vendor: run make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2023-12-11 10:48:36 +02:00
parent 635da5fab7
commit 51df2248f0
No known key found for this signature in database
GPG key ID: 52C003EE2BCDB9EB
305 changed files with 13590 additions and 4556 deletions

84
go.mod
View file

@ -13,10 +13,10 @@ require (
github.com/VictoriaMetrics/fasthttp v1.2.0
github.com/VictoriaMetrics/metrics v1.25.3
github.com/VictoriaMetrics/metricsql v0.70.0
github.com/aws/aws-sdk-go-v2 v1.23.0
github.com/aws/aws-sdk-go-v2/config v1.25.1
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.8
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.2
github.com/aws/aws-sdk-go-v2 v1.24.0
github.com/aws/aws-sdk-go-v2/config v1.26.1
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7
github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5
github.com/bmatcuk/doublestar/v4 v4.6.1
github.com/cespare/xxhash/v2 v2.2.0
github.com/cheggaaa/pb/v3 v3.1.4
@ -24,48 +24,48 @@ require (
github.com/golang/snappy v0.0.4
github.com/googleapis/gax-go/v2 v2.12.0
github.com/influxdata/influxdb v1.11.2
github.com/klauspost/compress v1.17.3
github.com/prometheus/prometheus v0.48.0
github.com/urfave/cli/v2 v2.25.7
github.com/klauspost/compress v1.17.4
github.com/prometheus/prometheus v0.48.1
github.com/urfave/cli/v2 v2.26.0
github.com/valyala/fastjson v1.6.4
github.com/valyala/fastrand v1.1.0
github.com/valyala/fasttemplate v1.2.2
github.com/valyala/gozstd v1.20.1
github.com/valyala/histogram v1.2.0
github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.18.0
golang.org/x/oauth2 v0.14.0
golang.org/x/net v0.19.0
golang.org/x/oauth2 v0.15.0
golang.org/x/sys v0.15.0
google.golang.org/api v0.150.0
google.golang.org/api v0.153.0
gopkg.in/yaml.v2 v2.4.0
)
require (
cloud.google.com/go v0.110.10 // indirect
cloud.google.com/go v0.111.0 // indirect
cloud.google.com/go/compute v1.23.3 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v1.1.5 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.47.12 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.16.1 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.17.2 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.25.2 // indirect
github.com/aws/smithy-go v1.17.0 // indirect
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
github.com/aws/aws-sdk-go v1.48.16 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.16.12 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 // indirect
github.com/aws/smithy-go v1.19.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@ -76,7 +76,7 @@ require (
github.com/go-logfmt/logfmt v0.6.0 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang-jwt/jwt/v5 v5.1.0 // indirect
github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/s2a-go v0.1.7 // indirect
@ -109,25 +109,25 @@ require (
github.com/valyala/bytebufferpool v1.0.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/collector/pdata v1.0.0-rcv0018 // indirect
go.opentelemetry.io/collector/semconv v0.89.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect
go.opentelemetry.io/otel v1.20.0 // indirect
go.opentelemetry.io/otel/metric v1.20.0 // indirect
go.opentelemetry.io/otel/trace v1.20.0 // indirect
go.opentelemetry.io/collector/pdata v1.0.0 // indirect
go.opentelemetry.io/collector/semconv v0.90.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
go.opentelemetry.io/otel v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect
go.opentelemetry.io/otel/trace v1.21.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/goleak v1.3.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/crypto v0.15.0 // indirect
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa // indirect
golang.org/x/crypto v0.16.0 // indirect
golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb // indirect
golang.org/x/sync v0.5.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.4.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect
google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect
google.golang.org/grpc v1.59.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

173
go.sum
View file

@ -13,8 +13,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y=
cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic=
cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM=
cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -45,8 +45,8 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 h1:QM6sE5k2ZT/vI5BEe0r7mqjsUSnhVBFbOsVkEuaEfiA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 h1:bWh0Z2rOEDfB/ywv/l0iHN1JgyazE6kW/aIA89+CEK0=
@ -74,54 +74,54 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs=
github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y=
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.47.12 h1:1daICVijigVEXCzhg27A5d7hbkR4wODPGn9GHyBclKM=
github.com/aws/aws-sdk-go v1.47.12/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v1.23.0 h1:PiHAzmiQQr6JULBUdvR8fKlA+UPKLT/8KbiqpFBWiAo=
github.com/aws/aws-sdk-go-v2 v1.23.0/go.mod h1:i1XDttT4rnf6vxc9AuskLc6s7XBee8rlLilKlc03uAA=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1 h1:ZY3108YtBNq96jNZTICHxN1gSBSbnvIdYwwqnvCV4Mc=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1/go.mod h1:t8PYl/6LzdAqsU4/9tz28V/kU+asFePvpOMkdul0gEQ=
github.com/aws/aws-sdk-go-v2/config v1.25.1 h1:YsjngBOl2mx4l3egkVWndr6/6TqtkdsWJFZIsQ924Ek=
github.com/aws/aws-sdk-go-v2/config v1.25.1/go.mod h1:yV6h7TRVzhdIFmUk9WWDRpWwYGg1woEzKr0k1IYz2Tk=
github.com/aws/aws-sdk-go-v2/credentials v1.16.1 h1:WessyrdgyFN5TB+eLQdrFSlN/3oMnqukIFhDxK6z8h0=
github.com/aws/aws-sdk-go-v2/credentials v1.16.1/go.mod h1:RQJyPxKcr+m4ArlIG1LUhMOrjposVfzbX6H8oR6oCgE=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4 h1:9wKDWEjwSnXZre0/O3+ZwbBl1SmlgWYBbrTV10X/H1s=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.4/go.mod h1:t4i+yGHMCcUNIX1x7YVYa6bH/Do7civ5I6cG/6PMfyA=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.8 h1:wuOjvalpd2CnXffks74Vq6n3yv9vunKCoy4R1sjStGk=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.8/go.mod h1:vywwjy6VnrR48Izg136JoSUXC4mH9QeUi3g0EH9DSrA=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3 h1:DUwbD79T8gyQ23qVXFUthjzVMTviSHi3y4z58KvghhM=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.3/go.mod h1:7sGSz1JCKHWWBHq98m6sMtWQikmYPpxjqOydDemiVoM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3 h1:AplLJCtIaUZDCbr6+gLYdsYNxne4iuaboJhVt9d+WXI=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.3/go.mod h1:ify42Rb7nKeDDPkFjKn7q1bPscVPu/+gmHH8d2c+anU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0 h1:usgqiJtamuGIBj+OvYmMq89+Z1hIKkMJToz1WpoeNUY=
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.0/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.3 h1:lMwCXiWJlrtZot0NJTjbC8G9zl+V3i68gBTBBvDeEXA=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.3/go.mod h1:5yzAuE9i2RkVAttBl8yxZgQr5OCq4D5yDnG7j9x2L0U=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 h1:rpkF4n0CyFcrJUG/rNNohoTmhtWlFTRI4BsZOh9PvLs=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1/go.mod h1:l9ymW25HOqymeU2m1gbUQ3rUIsTwKs8gYHXkqDQUhiI=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.3 h1:xbwRyCy7kXrOj89iIKLB6NfE2WCpP9HoKyk8dMDvnIQ=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.3/go.mod h1:R+/S1O4TYpcktbVwddeOYg+uwUfLhADP2S/x4QwsCTM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3 h1:kJOolE8xBAD13xTCgOakByZkyP4D/owNmvEiioeUNAg=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.3/go.mod h1:Owv1I59vaghv1Ax8zz8ELY8DN7/Y0rGS+WWAmjgi950=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.3 h1:KV0z2RDc7euMtg8aUT1czv5p29zcLlXALNFsd3jkkEc=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.3/go.mod h1:KZgs2ny8HsxRIRbDwgvJcHHBZPOzQr/+NtGwnP+w2ec=
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.2 h1:NnduxUd9+Fq9DcCDdJK8v6l9lR1xDX4usvog+JuQAno=
github.com/aws/aws-sdk-go-v2/service/s3 v1.42.2/go.mod h1:NXRKkiRF+erX2hnybnVU660cYT5/KChRD4iUgJ97cI8=
github.com/aws/aws-sdk-go-v2/service/sso v1.17.2 h1:V47N5eKgVZoRSvx2+RQ0EpAEit/pqOhqeSQFiS4OFEQ=
github.com/aws/aws-sdk-go-v2/service/sso v1.17.2/go.mod h1:/pE21vno3q1h4bbhUOEi+6Zu/aT26UK2WKkDXd+TssQ=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.2 h1:sMAcO7VHVw28HTAdZpTULDzFirHOsVm/x25CxhUH0jA=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.2/go.mod h1:dWqm5G767qwKPuayKfzm4rjzFmVjiBFbOJrpSPnAMDs=
github.com/aws/aws-sdk-go-v2/service/sts v1.25.2 h1:vwyiRTnXLqsak/6WAQ+uTRhVqKI6vxUQ0HJXjKij0zM=
github.com/aws/aws-sdk-go-v2/service/sts v1.25.2/go.mod h1:4EqRHDCKP78hq3zOnmFXu5k0j4bXbRFfCh/zQ6KnEfQ=
github.com/aws/smithy-go v1.17.0 h1:wWJD7LX6PBV6etBUwO0zElG0nWN9rUhp0WdYeHSHAaI=
github.com/aws/smithy-go v1.17.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
github.com/aws/aws-sdk-go v1.48.16 h1:mcj2/9J/MJ55Dov+ocMevhR8Jv6jW/fAxbrn4a1JFc8=
github.com/aws/aws-sdk-go v1.48.16/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk=
github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo=
github.com/aws/aws-sdk-go-v2/config v1.26.1 h1:z6DqMxclFGL3Zfo+4Q0rLnAZ6yVkzCRxhRMsiRQnD1o=
github.com/aws/aws-sdk-go-v2/config v1.26.1/go.mod h1:ZB+CuKHRbb5v5F0oJtGdhFTelmrxd4iWO1lf0rQwSAg=
github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuTegM+K8nncFkr1usU=
github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7 h1:FnLf60PtjXp8ZOzQfhJVsqF0OtYKQZWQfqOLshh8YXg=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7/go.mod h1:tDVvl8hyU6E9B8TrnNrZQEVkQlB8hjJwcgpPhgtlnNg=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9/go.mod h1:hqamLz7g1/4EJP+GH5NBhcUMLjW+gKLQabgyz6/7WAU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 h1:ugD6qzjYtB7zM5PN/ZIeaAIyefPaD82G8+SJopgvUpw=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9/go.mod h1:YD0aYBWCrPENpHolhKw2XDlTIWae2GKXT1T4o6N6hiM=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 h1:/90OR2XbSYfXucBMJ4U14wrjlfleq/0SB6dZDPncgmo=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9/go.mod h1:dN/Of9/fNZet7UrQQ6kTDo/VSwKPIq94vjlU16bRARc=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJR8CSImIVCONRi4g0Su3J+TSTbS7G0pUeMU=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 h1:iEAeF6YC3l4FzlJPP9H3Ko1TXpdjdqWffxXjp8SY6uk=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9/go.mod h1:kjsXoK23q9Z/tLBrckZLLyvjhZoS+AGrzqzUfEClvMM=
github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 h1:Keso8lIOS+IzI2MkPZyK6G0LYcK3My2LQ+T5bxghEAY=
github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA=
github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM=
github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38=
github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 h1:5UYvv8JUvllZsRnfrcMQ+hJ9jNICmcgKPAO1CER25Wg=
github.com/aws/aws-sdk-go-v2/service/sts v1.26.5/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU=
github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -194,8 +194,8 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.1.0 h1:UGKbA/IPjtS6zLcdB7i5TyACMgSbOTiR8qzXgw8HWQU=
github.com/golang-jwt/jwt/v5 v5.1.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw=
github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -314,8 +314,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -393,8 +393,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/prometheus/prometheus v0.48.0 h1:yrBloImGQ7je4h8M10ujGh4R6oxYQJQKlMuETwNskGk=
github.com/prometheus/prometheus v0.48.0/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g=
github.com/prometheus/prometheus v0.48.1 h1:CTszphSNTXkuCG6O0IfpKdHcJkvvnAAE1GbELKS+NFk=
github.com/prometheus/prometheus v0.48.1/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@ -421,8 +421,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/urfave/cli/v2 v2.26.0 h1:3f3AMg3HpThFNT4I++TKOejZO8yU55t3JnnSr4S4QEI=
github.com/urfave/cli/v2 v2.26.0/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
@ -454,18 +454,19 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0018 h1:a2IHOZKphRzPagcvOHQHHUE0DlITFSKlIBwaWhPZpl4=
go.opentelemetry.io/collector/pdata v1.0.0-rcv0018/go.mod h1:oNIcTRyEJYIfMcRYyyh5lquDU0Vl+ktTL6ka+p+dYvg=
go.opentelemetry.io/collector/semconv v0.89.0 h1:Sw+MiI3/oiYIY+ebkanZsOaBxXMx3sqnH1/6NaD4rLQ=
go.opentelemetry.io/collector/semconv v0.89.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 h1:1eHu3/pUSWaOgltNK3WJFaywKsTIr/PwvHyDmi0lQA0=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0/go.mod h1:HyABWq60Uy1kjJSa2BVOxUVao8Cdick5AWSKPutqy6U=
go.opentelemetry.io/otel v1.20.0 h1:vsb/ggIY+hUjD/zCAQHpzTmndPqv/ml2ArbsbfBYTAc=
go.opentelemetry.io/otel v1.20.0/go.mod h1:oUIGj3D77RwJdM6PPZImDpSZGDvkD9fhesHny69JFrs=
go.opentelemetry.io/otel/metric v1.20.0 h1:ZlrO8Hu9+GAhnepmRGhSU7/VkpjrNowxRN9GyKR4wzA=
go.opentelemetry.io/otel/metric v1.20.0/go.mod h1:90DRw3nfK4D7Sm/75yQ00gTJxtkBxX+wu6YaNymbpVM=
go.opentelemetry.io/otel/trace v1.20.0 h1:+yxVAPZPbQhbC3OfAkeIVTky6iTFpcr4SiY9om7mXSQ=
go.opentelemetry.io/otel/trace v1.20.0/go.mod h1:HJSK7F/hA5RlzpZ0zKDCHCDHm556LCDtKaAo6JmBFUU=
go.opentelemetry.io/collector/pdata v1.0.0 h1:ECP2jnLztewsHmL1opL8BeMtWVc7/oSlKNhfY9jP8ec=
go.opentelemetry.io/collector/pdata v1.0.0/go.mod h1:TsDFgs4JLNG7t6x9D8kGswXUz4mme+MyNChHx8zSF6k=
go.opentelemetry.io/collector/semconv v0.90.1 h1:2fkQZbefQBbIcNb9Rk1mRcWlFZgQOk7CpST1e1BK8eg=
go.opentelemetry.io/collector/semconv v0.90.1/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@ -480,8 +481,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -492,8 +493,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ=
golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE=
golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8=
golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -550,16 +551,16 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0=
golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -623,7 +624,7 @@ golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -637,8 +638,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.4.0 h1:Z81tqI5ddIoXDPvVQ7/7CC9TnLM7ubaFG2qXYd5BbYY=
golang.org/x/time v0.4.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@ -682,7 +683,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8=
golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -705,8 +706,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.150.0 h1:Z9k22qD289SZ8gCJrk4DrWXkNjtfvKAUo/l1ma8eBYE=
google.golang.org/api v0.150.0/go.mod h1:ccy+MJ6nrYFgE3WgRx/AMXOxOmU8Q4hSa+jjibzhxcg=
google.golang.org/api v0.153.0 h1:N1AwGhielyKFaUqH07/ZSIQR3uNPcV7NVw0vj+j4iR4=
google.golang.org/api v0.153.0/go.mod h1:3qNJX5eOmhiWYc67jRA/3GsDw97UFb5ivv7Y2PrriAY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -744,12 +745,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ=
google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY=
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo=
google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA=
google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4 h1:W12Pwm4urIbRdGhMEg2NM9O3TWKjNcxQhs46V0ypf/k=
google.golang.org/genproto v0.0.0-20231127180814-3a041ad873d4/go.mod h1:5RBcpGRxr25RbDzY5w+dmaqpSEvl8Gwl1x2CICf60ic=
google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4 h1:ZcOkrmX74HbKFYnpPY8Qsw93fC29TbJXspYKaBkSXDQ=
google.golang.org/genproto/googleapis/api v0.0.0-20231127180814-3a041ad873d4/go.mod h1:k2dtGpRrbsSyKcNPKKI5sstZkrNCZwpU/ns96JoHbGg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=

View file

@ -183,7 +183,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
continue
}
p.ActualSize = uint64(o.Size)
p.ActualSize = uint64(*o.Size)
parts = append(parts, p)
}

View file

@ -619,6 +619,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/cloudprofiler/apiv2": {
"api_shortname": "cloudprofiler",
"distribution_name": "cloud.google.com/go/cloudprofiler/apiv2",
"description": "Cloud Profiler API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudprofiler/latest/apiv2",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/cloudtasks/apiv2": {
"api_shortname": "cloudtasks",
"distribution_name": "cloud.google.com/go/cloudtasks/apiv2",
@ -1019,6 +1029,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/edgenetwork/apiv1": {
"api_shortname": "edgenetwork",
"distribution_name": "cloud.google.com/go/edgenetwork/apiv1",
"description": "Distributed Cloud Edge Network API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgenetwork/latest/apiv1",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/errorreporting": {
"api_shortname": "clouderrorreporting",
"distribution_name": "cloud.google.com/go/errorreporting",
@ -1099,6 +1119,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/firestore/apiv1/admin": {
"api_shortname": "firestore",
"distribution_name": "cloud.google.com/go/firestore/apiv1/admin",
"description": "Cloud Firestore API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/firestore/latest/apiv1/admin",
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/functions/apiv1": {
"api_shortname": "cloudfunctions",
"distribution_name": "cloud.google.com/go/functions/apiv1",
@ -2279,6 +2309,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/telcoautomation/apiv1": {
"api_shortname": "telcoautomation",
"distribution_name": "cloud.google.com/go/telcoautomation/apiv1",
"description": "Telco Automation API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/telcoautomation/latest/apiv1",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/texttospeech/apiv1": {
"api_shortname": "texttospeech",
"distribution_name": "cloud.google.com/go/texttospeech/apiv1",

View file

@ -16,35 +16,94 @@ package trace
import (
"context"
"errors"
"fmt"
"os"
"strings"
"go.opencensus.io/trace"
"golang.org/x/xerrors"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
ottrace "go.opentelemetry.io/otel/trace"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
// StartSpan adds a span to the trace with the given name.
const (
telemetryPlatformTracingOpenCensus = "opencensus"
telemetryPlatformTracingOpenTelemetry = "opentelemetry"
telemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING"
)
var (
// TODO(chrisdsmith): Should the name of the OpenTelemetry tracer be public and mutable?
openTelemetryTracerName string = "cloud.google.com/go"
openTelemetryTracingEnabled bool = strings.EqualFold(strings.TrimSpace(
os.Getenv(telemetryPlatformTracingVar)), telemetryPlatformTracingOpenTelemetry)
)
// IsOpenCensusTracingEnabled returns true if the environment variable
// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the
// case-insensitive value "opentelemetry".
func IsOpenCensusTracingEnabled() bool {
return !IsOpenTelemetryTracingEnabled()
}
// IsOpenTelemetryTracingEnabled returns true if the environment variable
// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the
// case-insensitive value "opentelemetry".
func IsOpenTelemetryTracingEnabled() bool {
return openTelemetryTracingEnabled
}
// StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled
// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled
// returns true, the span will be an OpenTelemetry span. Set the environment variable
// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
// value "opentelemetry" before loading the package to use OpenTelemetry tracing.
// The default will remain OpenCensus until [TBD], at which time the default will
// switch to "opentelemetry" and explicitly setting the environment variable to
// "opencensus" will be required to continue using OpenCensus tracing.
func StartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name)
if IsOpenTelemetryTracingEnabled() {
ctx, _ = otel.GetTracerProvider().Tracer(openTelemetryTracerName).Start(ctx, name)
} else {
ctx, _ = trace.StartSpan(ctx, name)
}
return ctx
}
// EndSpan ends a span with the given error.
// EndSpan ends a span with the given error. If IsOpenCensusTracingEnabled
// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled
// returns true, the span will be an OpenTelemetry span. Set the environment variable
// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
// value "opentelemetry" before loading the package to use OpenTelemetry tracing.
// The default will remain OpenCensus until [TBD], at which time the default will
// switch to "opentelemetry" and explicitly setting the environment variable to
// "opencensus" will be required to continue using OpenCensus tracing.
func EndSpan(ctx context.Context, err error) {
span := trace.FromContext(ctx)
if err != nil {
span.SetStatus(toStatus(err))
if IsOpenTelemetryTracingEnabled() {
span := ottrace.SpanFromContext(ctx)
if err != nil {
span.SetStatus(codes.Error, toOpenTelemetryStatusDescription(err))
span.RecordError(err)
}
span.End()
} else {
span := trace.FromContext(ctx)
if err != nil {
span.SetStatus(toStatus(err))
}
span.End()
}
span.End()
}
// toStatus interrogates an error and converts it to an appropriate
// OpenCensus status.
// toStatus converts an error to an equivalent OpenCensus status.
func toStatus(err error) trace.Status {
var err2 *googleapi.Error
if ok := xerrors.As(err, &err2); ok {
if ok := errors.As(err, &err2); ok {
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
} else if s, ok := status.FromError(err); ok {
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
@ -53,6 +112,18 @@ func toStatus(err error) trace.Status {
}
}
// toOpenTelemetryStatus converts an error to an equivalent OpenTelemetry status description.
func toOpenTelemetryStatusDescription(err error) string {
var err2 *googleapi.Error
if ok := errors.As(err, &err2); ok {
return err2.Message
} else if s, ok := status.FromError(err); ok {
return s.Message()
} else {
return err.Error()
}
}
// TODO(deklerk): switch to using OpenCensus function when it becomes available.
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
@ -86,10 +157,33 @@ func httpStatusCodeToOCCode(httpStatusCode int) int32 {
}
}
// TODO: (odeke-em): perhaps just pass around spans due to the cost
// incurred from using trace.FromContext(ctx) yet we could avoid
// throwing away the work done by ctx, span := trace.StartSpan.
// TracePrintf retrieves the current OpenCensus or OpenTelemetry span from context, then:
// * calls Span.Annotatef if OpenCensus is enabled; or
// * calls Span.AddEvent if OpenTelemetry is enabled.
//
// If IsOpenCensusTracingEnabled returns true, the expected span must be an
// OpenCensus span. If IsOpenTelemetryTracingEnabled returns true, the expected
// span must be an OpenTelemetry span. Set the environment variable
// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
// value "opentelemetry" before loading the package to use OpenTelemetry tracing.
// The default will remain OpenCensus until [TBD], at which time the default will
// switch to "opentelemetry" and explicitly setting the environment variable to
// "opencensus" will be required to continue using OpenCensus tracing.
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
if IsOpenTelemetryTracingEnabled() {
attrs := otAttrs(attrMap)
ottrace.SpanFromContext(ctx).AddEvent(fmt.Sprintf(format, args...), ottrace.WithAttributes(attrs...))
} else {
attrs := ocAttrs(attrMap)
// TODO: (odeke-em): perhaps just pass around spans due to the cost
// incurred from using trace.FromContext(ctx) yet we could avoid
// throwing away the work done by ctx, span := trace.StartSpan.
trace.FromContext(ctx).Annotatef(attrs, format, args...)
}
}
// ocAttrs converts a generic map to OpenCensus attributes.
func ocAttrs(attrMap map[string]interface{}) []trace.Attribute {
var attrs []trace.Attribute
for k, v := range attrMap {
var a trace.Attribute
@ -107,5 +201,27 @@ func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format str
}
attrs = append(attrs, a)
}
trace.FromContext(ctx).Annotatef(attrs, format, args...)
return attrs
}
// otAttrs converts a generic map to OpenTelemetry attributes.
func otAttrs(attrMap map[string]interface{}) []attribute.KeyValue {
var attrs []attribute.KeyValue
for k, v := range attrMap {
var a attribute.KeyValue
switch v := v.(type) {
case string:
a = attribute.Key(k).String(v)
case bool:
a = attribute.Key(k).Bool(v)
case int:
a = attribute.Key(k).Int(v)
case int64:
a = attribute.Key(k).Int64(v)
default:
a = attribute.Key(k).String(fmt.Sprintf("%#v", v))
}
attrs = append(attrs, a)
}
return attrs
}

11
vendor/github.com/alecthomas/units/renovate.json5 generated vendored Normal file
View file

@ -0,0 +1,11 @@
{
$schema: "https://docs.renovatebot.com/renovate-schema.json",
extends: [
"config:recommended",
":semanticCommits",
":semanticCommitTypeAll(chore)",
":semanticCommitScope(deps)",
"group:allNonMajor",
"schedule:earlyMondays", // Run once a week.
],
}

View file

@ -150,6 +150,18 @@ type Config struct {
// BaseEndpoint is an intermediary transfer location to a service specific
// BaseEndpoint on a service's Options.
BaseEndpoint *string
// DisableRequestCompression toggles if an operation request could be
// compressed or not. Will be set to false by default. This variable is sourced from
// environment variable AWS_DISABLE_REQUEST_COMPRESSION or the shared config profile attribute
// disable_request_compression
DisableRequestCompression bool
// RequestMinCompressSizeBytes sets the inclusive min bytes of a request body that could be
// compressed. Will be set to 10240 by default and must be within 0 and 10485760 bytes inclusively.
// This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or
// the shared config profile attribute request_min_compression_size_bytes
RequestMinCompressSizeBytes int64
}
// NewConfig returns a new Config pointer that can be chained with builder

View file

@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.23.0"
const goModuleVersion = "1.24.0"

View file

@ -0,0 +1,319 @@
// Package metrics implements metrics gathering for SDK development purposes.
//
// This package is designated as private and is intended for use only by the
// AWS client runtime. The exported API therein is not considered stable and
// is subject to breaking changes without notice.
package metrics
import (
"context"
"encoding/json"
"fmt"
"sync"
"time"
"github.com/aws/smithy-go/middleware"
)
const (
// ServiceIDKey is the key for the service ID metric.
ServiceIDKey = "ServiceId"
// OperationNameKey is the key for the operation name metric.
OperationNameKey = "OperationName"
// ClientRequestIDKey is the key for the client request ID metric.
ClientRequestIDKey = "ClientRequestId"
// APICallDurationKey is the key for the API call duration metric.
APICallDurationKey = "ApiCallDuration"
// APICallSuccessfulKey is the key for the API call successful metric.
APICallSuccessfulKey = "ApiCallSuccessful"
// MarshallingDurationKey is the key for the marshalling duration metric.
MarshallingDurationKey = "MarshallingDuration"
// InThroughputKey is the key for the input throughput metric.
InThroughputKey = "InThroughput"
// OutThroughputKey is the key for the output throughput metric.
OutThroughputKey = "OutThroughput"
// RetryCountKey is the key for the retry count metric.
RetryCountKey = "RetryCount"
// HTTPStatusCodeKey is the key for the HTTP status code metric.
HTTPStatusCodeKey = "HttpStatusCode"
// AWSExtendedRequestIDKey is the key for the AWS extended request ID metric.
AWSExtendedRequestIDKey = "AwsExtendedRequestId"
// AWSRequestIDKey is the key for the AWS request ID metric.
AWSRequestIDKey = "AwsRequestId"
// BackoffDelayDurationKey is the key for the backoff delay duration metric.
BackoffDelayDurationKey = "BackoffDelayDuration"
// StreamThroughputKey is the key for the stream throughput metric.
StreamThroughputKey = "Throughput"
// ConcurrencyAcquireDurationKey is the key for the concurrency acquire duration metric.
ConcurrencyAcquireDurationKey = "ConcurrencyAcquireDuration"
// PendingConcurrencyAcquiresKey is the key for the pending concurrency acquires metric.
PendingConcurrencyAcquiresKey = "PendingConcurrencyAcquires"
// SigningDurationKey is the key for the signing duration metric.
SigningDurationKey = "SigningDuration"
// UnmarshallingDurationKey is the key for the unmarshalling duration metric.
UnmarshallingDurationKey = "UnmarshallingDuration"
// TimeToFirstByteKey is the key for the time to first byte metric.
TimeToFirstByteKey = "TimeToFirstByte"
// ServiceCallDurationKey is the key for the service call duration metric.
ServiceCallDurationKey = "ServiceCallDuration"
// EndpointResolutionDurationKey is the key for the endpoint resolution duration metric.
EndpointResolutionDurationKey = "EndpointResolutionDuration"
// AttemptNumberKey is the key for the attempt number metric.
AttemptNumberKey = "AttemptNumber"
// MaxConcurrencyKey is the key for the max concurrency metric.
MaxConcurrencyKey = "MaxConcurrency"
// AvailableConcurrencyKey is the key for the available concurrency metric.
AvailableConcurrencyKey = "AvailableConcurrency"
)
// MetricPublisher provides the interface to provide custom MetricPublishers.
// PostRequestMetrics will be invoked by the MetricCollection middleware to post request.
// PostStreamMetrics will be invoked by ReadCloserWithMetrics to post stream metrics.
type MetricPublisher interface {
PostRequestMetrics(*MetricData) error
PostStreamMetrics(*MetricData) error
}
// Serializer provides the interface to provide custom Serializers.
// Serialize will transform any input object in its corresponding string representation.
type Serializer interface {
Serialize(obj interface{}) (string, error)
}
// DefaultSerializer is an implementation of the Serializer interface.
type DefaultSerializer struct{}
// Serialize uses the default JSON serializer to obtain the string representation of an object.
func (DefaultSerializer) Serialize(obj interface{}) (string, error) {
bytes, err := json.Marshal(obj)
if err != nil {
return "", err
}
return string(bytes), nil
}
type metricContextKey struct{}
// MetricContext contains fields to store metric-related information.
type MetricContext struct {
connectionCounter *SharedConnectionCounter
publisher MetricPublisher
data *MetricData
}
// MetricData stores the collected metric data.
type MetricData struct {
RequestStartTime time.Time
RequestEndTime time.Time
APICallDuration time.Duration
SerializeStartTime time.Time
SerializeEndTime time.Time
MarshallingDuration time.Duration
ResolveEndpointStartTime time.Time
ResolveEndpointEndTime time.Time
EndpointResolutionDuration time.Duration
InThroughput float64
OutThroughput float64
RetryCount int
Success uint8
StatusCode int
ClientRequestID string
ServiceID string
OperationName string
PartitionID string
Region string
RequestContentLength int64
Stream StreamMetrics
Attempts []AttemptMetrics
}
// StreamMetrics stores metrics related to streaming data.
type StreamMetrics struct {
ReadDuration time.Duration
ReadBytes int64
Throughput float64
}
// AttemptMetrics stores metrics related to individual attempts.
type AttemptMetrics struct {
ServiceCallStart time.Time
ServiceCallEnd time.Time
ServiceCallDuration time.Duration
FirstByteTime time.Time
TimeToFirstByte time.Duration
ConnRequestedTime time.Time
ConnObtainedTime time.Time
ConcurrencyAcquireDuration time.Duration
CredentialFetchStartTime time.Time
CredentialFetchEndTime time.Time
SignStartTime time.Time
SignEndTime time.Time
SigningDuration time.Duration
DeserializeStartTime time.Time
DeserializeEndTime time.Time
UnMarshallingDuration time.Duration
RetryDelay time.Duration
ResponseContentLength int64
StatusCode int
RequestID string
ExtendedRequestID string
HTTPClient string
MaxConcurrency int
PendingConnectionAcquires int
AvailableConcurrency int
ActiveRequests int
ReusedConnection bool
}
// Data returns the MetricData associated with the MetricContext.
func (mc *MetricContext) Data() *MetricData {
return mc.data
}
// ConnectionCounter returns the SharedConnectionCounter associated with the MetricContext.
func (mc *MetricContext) ConnectionCounter() *SharedConnectionCounter {
return mc.connectionCounter
}
// Publisher returns the MetricPublisher associated with the MetricContext.
func (mc *MetricContext) Publisher() MetricPublisher {
return mc.publisher
}
// ComputeRequestMetrics calculates and populates derived metrics based on the collected data.
func (md *MetricData) ComputeRequestMetrics() {
for idx := range md.Attempts {
attempt := &md.Attempts[idx]
attempt.ConcurrencyAcquireDuration = attempt.ConnObtainedTime.Sub(attempt.ConnRequestedTime)
attempt.SigningDuration = attempt.SignEndTime.Sub(attempt.SignStartTime)
attempt.UnMarshallingDuration = attempt.DeserializeEndTime.Sub(attempt.DeserializeStartTime)
attempt.TimeToFirstByte = attempt.FirstByteTime.Sub(attempt.ServiceCallStart)
attempt.ServiceCallDuration = attempt.ServiceCallEnd.Sub(attempt.ServiceCallStart)
}
md.APICallDuration = md.RequestEndTime.Sub(md.RequestStartTime)
md.MarshallingDuration = md.SerializeEndTime.Sub(md.SerializeStartTime)
md.EndpointResolutionDuration = md.ResolveEndpointEndTime.Sub(md.ResolveEndpointStartTime)
md.RetryCount = len(md.Attempts) - 1
latestAttempt, err := md.LatestAttempt()
if err != nil {
fmt.Printf("error retrieving attempts data due to: %s. Skipping Throughput metrics", err.Error())
} else {
md.StatusCode = latestAttempt.StatusCode
if md.Success == 1 {
if latestAttempt.ResponseContentLength > 0 && latestAttempt.ServiceCallDuration > 0 {
md.InThroughput = float64(latestAttempt.ResponseContentLength) / latestAttempt.ServiceCallDuration.Seconds()
}
if md.RequestContentLength > 0 && latestAttempt.ServiceCallDuration > 0 {
md.OutThroughput = float64(md.RequestContentLength) / latestAttempt.ServiceCallDuration.Seconds()
}
}
}
}
// LatestAttempt returns the latest attempt metrics.
// It returns an error if no attempts are initialized.
func (md *MetricData) LatestAttempt() (*AttemptMetrics, error) {
if md.Attempts == nil || len(md.Attempts) == 0 {
return nil, fmt.Errorf("no attempts initialized. NewAttempt() should be called first")
}
return &md.Attempts[len(md.Attempts)-1], nil
}
// NewAttempt initializes new attempt metrics.
func (md *MetricData) NewAttempt() {
if md.Attempts == nil {
md.Attempts = []AttemptMetrics{}
}
md.Attempts = append(md.Attempts, AttemptMetrics{})
}
// SharedConnectionCounter is a counter shared across API calls.
type SharedConnectionCounter struct {
mu sync.Mutex
activeRequests int
pendingConnectionAcquire int
}
// ActiveRequests returns the count of active requests.
func (cc *SharedConnectionCounter) ActiveRequests() int {
cc.mu.Lock()
defer cc.mu.Unlock()
return cc.activeRequests
}
// PendingConnectionAcquire returns the count of pending connection acquires.
func (cc *SharedConnectionCounter) PendingConnectionAcquire() int {
cc.mu.Lock()
defer cc.mu.Unlock()
return cc.pendingConnectionAcquire
}
// AddActiveRequest increments the count of active requests.
func (cc *SharedConnectionCounter) AddActiveRequest() {
cc.mu.Lock()
defer cc.mu.Unlock()
cc.activeRequests++
}
// RemoveActiveRequest decrements the count of active requests.
func (cc *SharedConnectionCounter) RemoveActiveRequest() {
cc.mu.Lock()
defer cc.mu.Unlock()
cc.activeRequests--
}
// AddPendingConnectionAcquire increments the count of pending connection acquires.
func (cc *SharedConnectionCounter) AddPendingConnectionAcquire() {
cc.mu.Lock()
defer cc.mu.Unlock()
cc.pendingConnectionAcquire++
}
// RemovePendingConnectionAcquire decrements the count of pending connection acquires.
func (cc *SharedConnectionCounter) RemovePendingConnectionAcquire() {
cc.mu.Lock()
defer cc.mu.Unlock()
cc.pendingConnectionAcquire--
}
// InitMetricContext initializes the metric context with the provided counter and publisher.
// It returns the updated context.
func InitMetricContext(
ctx context.Context, counter *SharedConnectionCounter, publisher MetricPublisher,
) context.Context {
if middleware.GetStackValue(ctx, metricContextKey{}) == nil {
ctx = middleware.WithStackValue(ctx, metricContextKey{}, &MetricContext{
connectionCounter: counter,
publisher: publisher,
data: &MetricData{
Attempts: []AttemptMetrics{},
Stream: StreamMetrics{},
},
})
}
return ctx
}
// Context returns the metric context from the given context.
// It returns nil if the metric context is not found.
func Context(ctx context.Context) *MetricContext {
mctx := middleware.GetStackValue(ctx, metricContextKey{})
if mctx == nil {
return nil
}
return mctx.(*MetricContext)
}

View file

@ -1,3 +1,15 @@
# v1.5.4 (2023-12-07)
* No change notes available for this release.
# v1.5.3 (2023-11-30)
* No change notes available for this release.
# v1.5.2 (2023-11-29)
* No change notes available for this release.
# v1.5.1 (2023-11-15)
* No change notes available for this release.

View file

@ -3,4 +3,4 @@
package eventstream
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.5.1"
const goModuleVersion = "1.5.4"

View file

@ -3,6 +3,7 @@ package retry
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics"
"strconv"
"strings"
"time"
@ -225,6 +226,13 @@ func (r *Attempt) handleAttempt(
// that time. Potentially early exist if the sleep is canceled via the
// context.
retryDelay, reqErr := r.retryer.RetryDelay(attemptNum, err)
mctx := metrics.Context(ctx)
if mctx != nil {
attempt, err := mctx.Data().LatestAttempt()
if err != nil {
attempt.RetryDelay = retryDelay
}
}
if reqErr != nil {
return out, attemptResult, releaseRetryToken, reqErr
}

View file

@ -11,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/middleware/private/metrics"
v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
@ -237,21 +238,32 @@ func (m *contentSHA256Header) HandleFinalize(
return next.HandleFinalize(ctx, in)
}
// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware.
// SignHTTPRequestMiddlewareOptions is the configuration options for
// [SignHTTPRequestMiddleware].
//
// Deprecated: [SignHTTPRequestMiddleware] is deprecated.
type SignHTTPRequestMiddlewareOptions struct {
CredentialsProvider aws.CredentialsProvider
Signer HTTPSigner
LogSigning bool
}
// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 HTTP Signing
// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4
// HTTP Signing.
//
// Deprecated: AWS service clients no longer use this middleware. Signing as an
// SDK operation is now performed through an internal per-service middleware
// which opaquely selects and uses the signer from the resolved auth scheme.
type SignHTTPRequestMiddleware struct {
credentialsProvider aws.CredentialsProvider
signer HTTPSigner
logSigning bool
}
// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests
// NewSignHTTPRequestMiddleware constructs a [SignHTTPRequestMiddleware] using
// the given [Signer] for signing requests.
//
// Deprecated: SignHTTPRequestMiddleware is deprecated.
func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware {
return &SignHTTPRequestMiddleware{
credentialsProvider: options.CredentialsProvider,
@ -260,12 +272,17 @@ func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *Sig
}
}
// ID is the SignHTTPRequestMiddleware identifier
// ID is the SignHTTPRequestMiddleware identifier.
//
// Deprecated: SignHTTPRequestMiddleware is deprecated.
func (s *SignHTTPRequestMiddleware) ID() string {
return "Signing"
}
// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme
// HandleFinalize will take the provided input and sign the request using the
// SigV4 authentication scheme.
//
// Deprecated: SignHTTPRequestMiddleware is deprecated.
func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
@ -284,7 +301,22 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl
return out, metadata, &SigningError{Err: fmt.Errorf("computed payload hash missing from context")}
}
mctx := metrics.Context(ctx)
if mctx != nil {
if attempt, err := mctx.Data().LatestAttempt(); err == nil {
attempt.CredentialFetchStartTime = sdk.NowTime()
}
}
credentials, err := s.credentialsProvider.Retrieve(ctx)
if mctx != nil {
if attempt, err := mctx.Data().LatestAttempt(); err == nil {
attempt.CredentialFetchEndTime = sdk.NowTime()
}
}
if err != nil {
return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)}
}
@ -305,7 +337,20 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl
})
}
if mctx != nil {
if attempt, err := mctx.Data().LatestAttempt(); err == nil {
attempt.SignStartTime = sdk.NowTime()
}
}
err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...)
if mctx != nil {
if attempt, err := mctx.Data().LatestAttempt(); err == nil {
attempt.SignEndTime = sdk.NowTime()
}
}
if err != nil {
return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)}
}

View file

@ -68,6 +68,9 @@ import (
const (
signingAlgorithm = "AWS4-HMAC-SHA256"
authorizationHeader = "Authorization"
// Version of signing v4
Version = "SigV4"
)
// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests
@ -103,6 +106,11 @@ type SignerOptions struct {
// This will enable logging of the canonical request, the string to sign, and for presigning the subsequent
// presigned URL.
LogSigning bool
// Disables setting the session token on the request as part of signing
// through X-Amz-Security-Token. This is needed for variations of v4 that
// present the token elsewhere.
DisableSessionToken bool
}
// Signer applies AWS v4 signing to given request. Use this to sign requests
@ -136,6 +144,7 @@ type httpSigner struct {
DisableHeaderHoisting bool
DisableURIPathEscaping bool
DisableSessionToken bool
}
func (s *httpSigner) Build() (signedRequest, error) {
@ -284,6 +293,7 @@ func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *ht
Time: v4Internal.NewSigningTime(signingTime.UTC()),
DisableHeaderHoisting: options.DisableHeaderHoisting,
DisableURIPathEscaping: options.DisableURIPathEscaping,
DisableSessionToken: options.DisableSessionToken,
KeyDerivator: s.keyDerivator,
}
@ -360,6 +370,7 @@ func (s *Signer) PresignHTTP(
IsPreSign: true,
DisableHeaderHoisting: options.DisableHeaderHoisting,
DisableURIPathEscaping: options.DisableURIPathEscaping,
DisableSessionToken: options.DisableSessionToken,
KeyDerivator: s.keyDerivator,
}
@ -502,7 +513,8 @@ func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Val
if s.IsPreSign {
query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm)
if sessionToken := s.Credentials.SessionToken; len(sessionToken) > 0 {
sessionToken := s.Credentials.SessionToken
if !s.DisableSessionToken && len(sessionToken) > 0 {
query.Set("X-Amz-Security-Token", sessionToken)
}
@ -512,7 +524,7 @@ func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Val
headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate)
if len(s.Credentials.SessionToken) > 0 {
if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 {
headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken)
}
}

View file

@ -1,3 +1,57 @@
# v1.26.1 (2023-12-08)
* **Bug Fix**: Correct loading of [services *] sections into shared config.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.0 (2023-12-07)
* **Feature**: Support modeled request compression. The only algorithm supported at this time is `gzip`.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.12 (2023-12-06)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.11 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.10 (2023-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.9 (2023-11-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.8 (2023-11-28.3)
* **Bug Fix**: Correct resolution of S3Express auth disable toggle.
# v1.25.7 (2023-11-28.2)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.6 (2023-11-28)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.5 (2023-11-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.4 (2023-11-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.3 (2023-11-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.2 (2023-11-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.1 (2023-11-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -70,10 +70,16 @@ var defaultAWSConfigResolvers = []awsConfigResolver{
// httpBearerAuth authentication scheme.
resolveBearerAuthToken,
// Sets the sdk app ID if present in shared config profile
// Sets the sdk app ID if present in env var or shared config profile
resolveAppID,
resolveBaseEndpoint,
// Sets the DisableRequestCompression if present in env var or shared config profile
resolveDisableRequestCompression,
// Sets the RequestMinCompressSizeBytes if present in env var or shared config profile
resolveRequestMinCompressSizeBytes,
}
// A Config represents a generic configuration value or set of values. This type

View file

@ -12,6 +12,7 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression"
)
// CredentialsSourceName provides a name of the provider when config is
@ -74,6 +75,11 @@ const (
awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS"
awsEndpointURL = "AWS_ENDPOINT_URL"
awsDisableRequestCompression = "AWS_DISABLE_REQUEST_COMPRESSION"
awsRequestMinCompressionSizeBytes = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES"
awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH"
)
var (
@ -268,6 +274,22 @@ type EnvConfig struct {
// Value to contain configured endpoints to be propagated to
// corresponding endpoint resolution field.
BaseEndpoint string
// determine if request compression is allowed, default to false
// retrieved from env var AWS_DISABLE_REQUEST_COMPRESSION
DisableRequestCompression *bool
// inclusive threshold request body size to trigger compression,
// default to 10240 and must be within 0 and 10485760 bytes inclusive
// retrieved from env var AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES
RequestMinCompressSizeBytes *int64
// Whether S3Express auth is disabled.
//
// This will NOT prevent requests from being made to S3Express buckets, it
// will only bypass the modified endpoint routing and signing behaviors
// associated with the feature.
S3DisableExpressAuth *bool
}
// loadEnvConfig reads configuration values from the OS's environment variables.
@ -310,6 +332,13 @@ func NewEnvConfig() (EnvConfig, error) {
cfg.AppID = os.Getenv(awsSdkAppID)
if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompression}); err != nil {
return cfg, err
}
if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytes}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil {
return cfg, err
}
if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil {
return cfg, err
}
@ -356,6 +385,10 @@ func NewEnvConfig() (EnvConfig, error) {
return cfg, err
}
if err := setBoolPtrFromEnvVal(&cfg.S3DisableExpressAuth, []string{awsS3DisableExpressSessionAuthEnv}); err != nil {
return cfg, err
}
return cfg, nil
}
@ -370,6 +403,20 @@ func (c EnvConfig) getAppID(context.Context) (string, bool, error) {
return c.AppID, len(c.AppID) > 0, nil
}
func (c EnvConfig) getDisableRequestCompression(context.Context) (bool, bool, error) {
if c.DisableRequestCompression == nil {
return false, false, nil
}
return *c.DisableRequestCompression, true, nil
}
func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) {
if c.RequestMinCompressSizeBytes == nil {
return 0, false, nil
}
return *c.RequestMinCompressSizeBytes, true, nil
}
// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified,
// and not 0.
func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) {
@ -626,6 +673,30 @@ func setBoolPtrFromEnvVal(dst **bool, keys []string) error {
return nil
}
func setInt64PtrFromEnvVal(dst **int64, keys []string, max int64) error {
for _, k := range keys {
value := os.Getenv(k)
if len(value) == 0 {
continue
}
v, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return fmt.Errorf("invalid value for env var, %s=%s, need int64", k, value)
} else if v < 0 || v > max {
return fmt.Errorf("invalid range for env var min request compression size bytes %q, must be within 0 and 10485760 inclusively", v)
}
if *dst == nil {
*dst = new(int64)
}
**dst = v
break
}
return nil
}
func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error {
for _, k := range keys {
value := os.Getenv(k)
@ -736,3 +807,13 @@ func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) {
return *c.EC2IMDSv1Disabled, true
}
// GetS3DisableExpressAuth returns the configured value for
// [EnvConfig.S3DisableExpressAuth].
func (c EnvConfig) GetS3DisableExpressAuth() (value, ok bool) {
if c.S3DisableExpressAuth == nil {
return false, false
}
return *c.S3DisableExpressAuth, true
}

View file

@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.25.1"
const goModuleVersion = "1.26.1"

View file

@ -206,6 +206,15 @@ type LoadOptions struct {
// The sdk app ID retrieved from env var or shared config to be added to request user agent header
AppID string
// Specifies whether an operation request could be compressed
DisableRequestCompression *bool
// The inclusive min bytes of a request body that could be compressed
RequestMinCompressSizeBytes *int64
// Whether S3 Express auth is disabled.
S3DisableExpressAuth *bool
}
func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) {
@ -253,6 +262,22 @@ func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) {
return o.AppID, len(o.AppID) > 0, nil
}
// getDisableRequestCompression returns DisableRequestCompression from config's LoadOptions
func (o LoadOptions) getDisableRequestCompression(ctx context.Context) (bool, bool, error) {
if o.DisableRequestCompression == nil {
return false, false, nil
}
return *o.DisableRequestCompression, true, nil
}
// getRequestMinCompressSizeBytes returns RequestMinCompressSizeBytes from config's LoadOptions
func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) {
if o.RequestMinCompressSizeBytes == nil {
return 0, false, nil
}
return *o.RequestMinCompressSizeBytes, true, nil
}
// WithRegion is a helper function to construct functional options
// that sets Region on config's LoadOptions. Setting the region to
// an empty string, will result in the region value being ignored.
@ -274,6 +299,30 @@ func WithAppID(ID string) LoadOptionsFunc {
}
}
// WithDisableRequestCompression is a helper function to construct functional options
// that sets DisableRequestCompression on config's LoadOptions.
func WithDisableRequestCompression(DisableRequestCompression *bool) LoadOptionsFunc {
return func(o *LoadOptions) error {
if DisableRequestCompression == nil {
return nil
}
o.DisableRequestCompression = DisableRequestCompression
return nil
}
}
// WithRequestMinCompressSizeBytes is a helper function to construct functional options
// that sets RequestMinCompressSizeBytes on config's LoadOptions.
func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOptionsFunc {
return func(o *LoadOptions) error {
if RequestMinCompressSizeBytes == nil {
return nil
}
o.RequestMinCompressSizeBytes = RequestMinCompressSizeBytes
return nil
}
}
// getDefaultRegion returns DefaultRegion from config's LoadOptions
func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) {
if len(o.DefaultRegion) == 0 {
@ -1044,3 +1093,22 @@ func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsMod
return nil
}
}
// GetS3DisableExpressAuth returns the configured value for
// [EnvConfig.S3DisableExpressAuth].
func (o LoadOptions) GetS3DisableExpressAuth() (value, ok bool) {
if o.S3DisableExpressAuth == nil {
return false, false
}
return *o.S3DisableExpressAuth, true
}
// WithS3DisableExpressAuth sets [LoadOptions.S3DisableExpressAuth]
// to the value provided.
func WithS3DisableExpressAuth(v bool) LoadOptionsFunc {
return func(o *LoadOptions) error {
o.S3DisableExpressAuth = &v
return nil
}
}

View file

@ -191,6 +191,40 @@ func getAppID(ctx context.Context, configs configs) (value string, found bool, e
return
}
// disableRequestCompressionProvider provides access to the DisableRequestCompression
type disableRequestCompressionProvider interface {
getDisableRequestCompression(context.Context) (bool, bool, error)
}
func getDisableRequestCompression(ctx context.Context, configs configs) (value bool, found bool, err error) {
for _, cfg := range configs {
if p, ok := cfg.(disableRequestCompressionProvider); ok {
value, found, err = p.getDisableRequestCompression(ctx)
if err != nil || found {
break
}
}
}
return
}
// requestMinCompressSizeBytesProvider provides access to the MinCompressSizeBytes
type requestMinCompressSizeBytesProvider interface {
getRequestMinCompressSizeBytes(context.Context) (int64, bool, error)
}
func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value int64, found bool, err error) {
for _, cfg := range configs {
if p, ok := cfg.(requestMinCompressSizeBytesProvider); ok {
value, found, err = p.getRequestMinCompressSizeBytes(ctx)
if err != nil || found {
break
}
}
}
return
}
// ec2IMDSRegionProvider provides access to the ec2 imds region
// configuration value
type ec2IMDSRegionProvider interface {

View file

@ -139,6 +139,33 @@ func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error {
return nil
}
// resolveDisableRequestCompression extracts the DisableRequestCompression from the configs slice's
// SharedConfig or EnvConfig
func resolveDisableRequestCompression(ctx context.Context, cfg *aws.Config, configs configs) error {
disable, _, err := getDisableRequestCompression(ctx, configs)
if err != nil {
return err
}
cfg.DisableRequestCompression = disable
return nil
}
// resolveRequestMinCompressSizeBytes extracts the RequestMinCompressSizeBytes from the configs slice's
// SharedConfig or EnvConfig
func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, configs configs) error {
minBytes, found, err := getRequestMinCompressSizeBytes(ctx, configs)
if err != nil {
return err
}
// must set a default min size 10240 if not configured
if !found {
minBytes = 10240
}
cfg.RequestMinCompressSizeBytes = minBytes
return nil
}
// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default
// region if region had not been resolved from other sources.
func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error {

View file

@ -17,6 +17,7 @@ import (
"github.com/aws/aws-sdk-go-v2/internal/ini"
"github.com/aws/aws-sdk-go-v2/internal/shareddefaults"
"github.com/aws/smithy-go/logging"
smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression"
)
const (
@ -30,7 +31,7 @@ const (
// Prefix for services section. It is referenced in profile via the services
// parameter to configure clients for service-specific parameters.
servicesPrefix = `services`
servicesPrefix = `services `
// string equivalent for boolean
endpointDiscoveryDisabled = `false`
@ -107,6 +108,13 @@ const (
ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls"
endpointURL = "endpoint_url"
servicesSectionKey = "services"
disableRequestCompression = "disable_request_compression"
requestMinCompressionSizeBytes = "request_min_compression_size_bytes"
s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth"
)
// defaultSharedConfigProfile allows for swapping the default profile for testing
@ -314,8 +322,25 @@ type SharedConfig struct {
// corresponding endpoint resolution field.
BaseEndpoint string
// Value to contain services section content.
Services Services
// Services section config.
ServicesSectionName string
Services Services
// determine if request compression is allowed, default to false
// retrieved from config file's profile field disable_request_compression
DisableRequestCompression *bool
// inclusive threshold request body size to trigger compression,
// default to 10240 and must be within 0 and 10485760 bytes inclusive
// retrieved from config file's profile field request_min_compression_size_bytes
RequestMinCompressSizeBytes *int64
// Whether S3Express auth is disabled.
//
// This will NOT prevent requests from being made to S3Express buckets, it
// will only bypass the modified endpoint routing and signing behaviors
// associated with the feature.
S3DisableExpressAuth *bool
}
func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) {
@ -435,6 +460,16 @@ func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEnd
return c.UseFIPSEndpoint, true, nil
}
// GetS3DisableExpressAuth returns the configured value for
// [SharedConfig.S3DisableExpressAuth].
func (c SharedConfig) GetS3DisableExpressAuth() (value, ok bool) {
if c.S3DisableExpressAuth == nil {
return false, false
}
return *c.S3DisableExpressAuth, true
}
// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was
func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) {
if len(c.CustomCABundle) == 0 {
@ -975,14 +1010,11 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile
c.SSOSession = &ssoSession
}
for _, sectionName := range sections.List() {
if strings.HasPrefix(sectionName, servicesPrefix) {
section, ok := sections.GetSection(sectionName)
if ok {
var svcs Services
svcs.setFromIniSection(section)
c.Services = svcs
}
if len(c.ServicesSectionName) > 0 {
if section, ok := sections.GetSection(servicesPrefix + c.ServicesSectionName); ok {
var svcs Services
svcs.setFromIniSection(section)
c.Services = svcs
}
}
@ -1054,6 +1086,7 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey)
updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey)
updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey)
updateBoolPtr(&c.S3DisableExpressAuth, section, s3DisableExpressSessionAuthKey)
if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil {
return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err)
@ -1084,6 +1117,13 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
updateString(&c.BaseEndpoint, section, endpointURL)
if err := updateDisableRequestCompression(&c.DisableRequestCompression, section, disableRequestCompression); err != nil {
return fmt.Errorf("failed to load %s from shared config, %w", disableRequestCompression, err)
}
if err := updateRequestMinCompressSizeBytes(&c.RequestMinCompressSizeBytes, section, requestMinCompressionSizeBytes); err != nil {
return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err)
}
// Shared Credentials
creds := aws.Credentials{
AccessKeyID: section.String(accessKeyIDKey),
@ -1096,9 +1136,61 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
c.Credentials = creds
}
updateString(&c.ServicesSectionName, section, servicesSectionKey)
return nil
}
func updateRequestMinCompressSizeBytes(bytes **int64, sec ini.Section, key string) error {
if !sec.Has(key) {
return nil
}
v, ok := sec.Int(key)
if !ok {
return fmt.Errorf("invalid value for min request compression size bytes %s, need int64", sec.String(key))
}
if v < 0 || v > smithyrequestcompression.MaxRequestMinCompressSizeBytes {
return fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", v)
}
*bytes = new(int64)
**bytes = v
return nil
}
func updateDisableRequestCompression(disable **bool, sec ini.Section, key string) error {
if !sec.Has(key) {
return nil
}
v := sec.String(key)
switch {
case v == "true":
*disable = new(bool)
**disable = true
case v == "false":
*disable = new(bool)
**disable = false
default:
return fmt.Errorf("invalid value for shared config profile field, %s=%s, need true or false", key, v)
}
return nil
}
func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) {
if c.RequestMinCompressSizeBytes == nil {
return 0, false, nil
}
return *c.RequestMinCompressSizeBytes, true, nil
}
func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, bool, error) {
if c.DisableRequestCompression == nil {
return false, false, nil
}
return *c.DisableRequestCompression, true, nil
}
func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error {
if !section.Has(key) {
return nil

View file

@ -1,3 +1,47 @@
# v1.16.12 (2023-12-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.11 (2023-12-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.10 (2023-12-06)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.9 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.8 (2023-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.7 (2023-11-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.6 (2023-11-28.2)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.5 (2023-11-28)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.4 (2023-11-21)
* **Bug Fix**: Don't expect error responses to have a JSON payload in the endpointcreds provider.
# v1.16.3 (2023-11-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.2 (2023-11-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.1 (2023-11-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -62,7 +62,16 @@ func New(options Options, optFns ...func(*Options)) *Client {
}
if options.Retryer == nil {
options.Retryer = retry.NewStandard()
// Amazon-owned implementations of this endpoint are known to sometimes
// return plaintext responses (i.e. no Code) like normal, add a few
// additional status codes
options.Retryer = retry.NewStandard(func(o *retry.StandardOptions) {
o.Retryables = append(o.Retryables, retry.RetryableHTTPStatusCode{
Codes: map[int]struct{}{
http.StatusTooManyRequests: {},
},
})
})
}
for _, fn := range optFns {
@ -122,9 +131,10 @@ type GetCredentialsOutput struct {
// EndpointError is an error returned from the endpoint service
type EndpointError struct {
Code string `json:"code"`
Message string `json:"message"`
Fault smithy.ErrorFault `json:"-"`
Code string `json:"code"`
Message string `json:"message"`
Fault smithy.ErrorFault `json:"-"`
statusCode int `json:"-"`
}
// Error is the error mesage string
@ -146,3 +156,8 @@ func (e *EndpointError) ErrorMessage() string {
func (e *EndpointError) ErrorFault() smithy.ErrorFault {
return e.Fault
}
// HTTPStatusCode implements retry.HTTPStatusCode.
func (e *EndpointError) HTTPStatusCode() int {
return e.statusCode
}

View file

@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"net/url"
"github.com/aws/smithy-go"
@ -104,17 +105,44 @@ func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in s
}
func deserializeError(response *smithyhttp.Response) error {
var errShape *EndpointError
err := json.NewDecoder(response.Body).Decode(&errShape)
// we could be talking to anything, json isn't guaranteed
// see https://github.com/aws/aws-sdk-go-v2/issues/2316
if response.Header.Get("Content-Type") == "application/json" {
return deserializeJSONError(response)
}
msg, err := io.ReadAll(response.Body)
if err != nil {
return &smithy.DeserializationError{Err: fmt.Errorf("failed to decode error message, %w", err)}
return &smithy.DeserializationError{
Err: fmt.Errorf("read response, %w", err),
}
}
if response.StatusCode >= 500 {
errShape.Fault = smithy.FaultServer
} else {
errShape.Fault = smithy.FaultClient
return &EndpointError{
// no sensible value for Code
Message: string(msg),
Fault: stof(response.StatusCode),
statusCode: response.StatusCode,
}
}
func deserializeJSONError(response *smithyhttp.Response) error {
var errShape *EndpointError
if err := json.NewDecoder(response.Body).Decode(&errShape); err != nil {
return &smithy.DeserializationError{
Err: fmt.Errorf("failed to decode error message, %w", err),
}
}
errShape.Fault = stof(response.StatusCode)
errShape.statusCode = response.StatusCode
return errShape
}
// maps HTTP status code to smithy ErrorFault
func stof(code int) smithy.ErrorFault {
if code >= 500 {
return smithy.FaultServer
}
return smithy.FaultClient
}

View file

@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.16.1"
const goModuleVersion = "1.16.12"

View file

@ -1,3 +1,27 @@
# v1.14.10 (2023-12-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.9 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.8 (2023-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.7 (2023-11-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.6 (2023-11-28.2)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.5 (2023-11-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.4 (2023-11-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package imds
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.14.4"
const goModuleVersion = "1.14.10"

View file

@ -1,3 +1,61 @@
# v1.15.7 (2023-12-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.6 (2023-12-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.5 (2023-12-06)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.4 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.3 (2023-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.2 (2023-11-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.1 (2023-11-28.3)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.15.0 (2023-11-28.2)
* **Feature**: Add S3Express support.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.4 (2023-11-28)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.3 (2023-11-27)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.2 (2023-11-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.1 (2023-11-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.14.0 (2023-11-17)
* **Feature**: **BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.9 (2023-11-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.8 (2023-11-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -436,8 +436,8 @@ func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) {
if resp.ContentRange == nil {
// ContentRange is nil when the full file contents is provided, and
// is not chunked. Use ContentLength instead.
if resp.ContentLength > 0 {
d.totalBytes = resp.ContentLength
if aws.ToInt64(resp.ContentLength) > 0 {
d.totalBytes = aws.ToInt64(resp.ContentLength)
return
}
} else {

View file

@ -3,4 +3,4 @@
package manager
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.13.8"
const goModuleVersion = "1.15.7"

View file

@ -13,8 +13,11 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/internal/awsutil"
internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
"github.com/aws/aws-sdk-go-v2/service/s3"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
smithymiddleware "github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// MaxUploadParts is the maximum allowed number of parts in a multi-part upload
@ -308,6 +311,9 @@ func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ...
clientOptions = append(clientOptions, func(o *s3.Options) {
o.APIOptions = append(o.APIOptions,
middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey),
func(s *smithymiddleware.Stack) error {
return s.Finalize.Insert(&setS3ExpressDefaultChecksum{}, "ResolveEndpointV2", smithymiddleware.After)
},
)
})
clientOptions = append(clientOptions, i.cfg.ClientOptions...)
@ -501,7 +507,7 @@ func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, e
return &UploadOutput{
Location: locationRecorder.location,
BucketKeyEnabled: out.BucketKeyEnabled,
BucketKeyEnabled: aws.ToBool(out.BucketKeyEnabled),
ChecksumCRC32: out.ChecksumCRC32,
ChecksumCRC32C: out.ChecksumCRC32C,
ChecksumSHA1: out.ChecksumSHA1,
@ -568,9 +574,11 @@ type chunk struct {
// since S3 required this list to be sent in sorted order.
type completedParts []types.CompletedPart
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }
func (a completedParts) Len() int { return len(a) }
func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a completedParts) Less(i, j int) bool {
return aws.ToInt32(a[i].PartNumber) < aws.ToInt32(a[j].PartNumber)
}
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
@ -639,7 +647,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadO
UploadID: u.uploadID,
CompletedParts: u.parts,
BucketKeyEnabled: completeOut.BucketKeyEnabled,
BucketKeyEnabled: aws.ToBool(completeOut.BucketKeyEnabled),
ChecksumCRC32: completeOut.ChecksumCRC32,
ChecksumCRC32C: completeOut.ChecksumCRC32C,
ChecksumSHA1: completeOut.ChecksumSHA1,
@ -722,7 +730,7 @@ func (u *multiuploader) send(c chunk) error {
// PutObject as they are never valid for individual parts of a
// multipart upload.
PartNumber: c.num,
PartNumber: aws.Int32(c.num),
UploadId: &u.uploadID,
}
// TODO should do copy then clear?
@ -734,7 +742,7 @@ func (u *multiuploader) send(c chunk) error {
var completed types.CompletedPart
awsutil.Copy(&completed, resp)
completed.PartNumber = c.num
completed.PartNumber = aws.Int32(c.num)
u.m.Lock()
u.parts = append(u.parts, completed)
@ -806,3 +814,42 @@ type readerAtSeeker interface {
io.ReaderAt
io.ReadSeeker
}
// setS3ExpressDefaultChecksum defaults to CRC32 for S3Express buckets,
// which is required when uploading to those through transfer manager.
type setS3ExpressDefaultChecksum struct{}
func (*setS3ExpressDefaultChecksum) ID() string {
return "setS3ExpressDefaultChecksum"
}
func (*setS3ExpressDefaultChecksum) HandleFinalize(
ctx context.Context, in smithymiddleware.FinalizeInput, next smithymiddleware.FinalizeHandler,
) (
out smithymiddleware.FinalizeOutput, metadata smithymiddleware.Metadata, err error,
) {
const checksumHeader = "x-amz-checksum-algorithm"
if internalcontext.GetS3Backend(ctx) != internalcontext.S3BackendS3Express {
return next.HandleFinalize(ctx, in)
}
// If this is CreateMultipartUpload we need to ensure the checksum
// algorithm header is present. Otherwise everything is driven off the
// context setting and we can let it flow from there.
if middleware.GetOperationName(ctx) == "CreateMultipartUpload" {
r, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
if internalcontext.GetChecksumInputAlgorithm(ctx) == "" {
r.Header.Set(checksumHeader, "CRC32")
}
return next.HandleFinalize(ctx, in)
} else if internalcontext.GetChecksumInputAlgorithm(ctx) == "" {
ctx = internalcontext.SetChecksumInputAlgorithm(ctx, string(types.ChecksumAlgorithmCrc32))
}
return next.HandleFinalize(ctx, in)
}

View file

@ -16,6 +16,9 @@ const SigV4 = "sigv4"
// Authentication Scheme Signature Version 4A
const SigV4A = "sigv4a"
// SigV4S3Express identifies the S3 S3Express auth scheme.
const SigV4S3Express = "sigv4-s3express"
// None is a constant representing the
// None Authentication Scheme
const None = "none"
@ -24,9 +27,10 @@ const None = "none"
// that indicates the list of supported AWS
// authentication schemes
var SupportedSchemes = map[string]bool{
SigV4: true,
SigV4A: true,
None: true,
SigV4: true,
SigV4A: true,
SigV4S3Express: true,
None: true,
}
// AuthenticationScheme is a representation of
@ -93,10 +97,11 @@ func GetAuthenticationSchemes(p *smithy.Properties) ([]AuthenticationScheme, err
for _, scheme := range authSchemes {
authScheme, _ := scheme.(map[string]interface{})
switch authScheme["name"] {
case SigV4:
version := authScheme["name"].(string)
switch version {
case SigV4, SigV4S3Express:
v4Scheme := AuthenticationSchemeV4{
Name: SigV4,
Name: version,
SigningName: getSigningName(authScheme),
SigningRegion: getSigningRegion(authScheme),
DisableDoubleEncoding: getDisableDoubleEncoding(authScheme),

View file

@ -36,7 +36,7 @@ func (v *BearerTokenProviderAdapter) GetIdentity(ctx context.Context, _ smithy.P
) {
token, err := v.Provider.RetrieveBearerToken(ctx)
if err != nil {
return nil, fmt.Errorf("get token: %v", err)
return nil, fmt.Errorf("get token: %w", err)
}
return &BearerTokenAdapter{Token: token}, nil

View file

@ -27,7 +27,7 @@ func (v *BearerTokenSignerAdapter) SignRequest(ctx context.Context, r *smithyhtt
signed, err := v.Signer.SignWithBearerToken(ctx, ca.Token, r)
if err != nil {
return fmt.Errorf("sign request: %v", err)
return fmt.Errorf("sign request: %w", err)
}
*r = *signed.(*smithyhttp.Request)

View file

@ -39,7 +39,7 @@ func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.P
creds, err := v.Provider.Retrieve(ctx)
if err != nil {
return nil, fmt.Errorf("get credentials: %v", err)
return nil, fmt.Errorf("get credentials: %w", err)
}
return &CredentialsAdapter{Credentials: creds}, nil

View file

@ -46,7 +46,7 @@ func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request
o.LogSigning = v.LogSigning
})
if err != nil {
return fmt.Errorf("sign http: %v", err)
return fmt.Errorf("sign http: %w", err)
}
return nil

View file

@ -1,3 +1,27 @@
# v1.2.9 (2023-12-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.8 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.7 (2023-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.6 (2023-11-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.5 (2023-11-28.2)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.4 (2023-11-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.3 (2023-11-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package configsources
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.2.3"
const goModuleVersion = "1.2.9"

View file

@ -0,0 +1,39 @@
package context
import (
"context"
"github.com/aws/smithy-go/middleware"
)
type s3BackendKey struct{}
type checksumInputAlgorithmKey struct{}
const (
// S3BackendS3Express identifies the S3Express backend
S3BackendS3Express = "S3Express"
)
// SetS3Backend stores the resolved endpoint backend within the request
// context, which is required for a variety of custom S3 behaviors.
func SetS3Backend(ctx context.Context, typ string) context.Context {
return middleware.WithStackValue(ctx, s3BackendKey{}, typ)
}
// GetS3Backend retrieves the stored endpoint backend within the context.
func GetS3Backend(ctx context.Context) string {
v, _ := middleware.GetStackValue(ctx, s3BackendKey{}).(string)
return v
}
// SetChecksumInputAlgorithm sets the request checksum algorithm on the
// context.
func SetChecksumInputAlgorithm(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, checksumInputAlgorithmKey{}, value)
}
// GetChecksumInputAlgorithm returns the checksum algorithm from the context.
func GetChecksumInputAlgorithm(ctx context.Context) string {
v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string)
return v
}

View file

@ -11,7 +11,7 @@ func GetPartition(region string) *PartitionConfig {
var partitions = []Partition{
{
ID: "aws",
RegionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$",
RegionRegex: "^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$",
DefaultConfig: PartitionConfig{
Name: "aws",
DnsSuffix: "amazonaws.com",
@ -90,6 +90,13 @@ var partitions = []Partition{
SupportsFIPS: nil,
SupportsDualStack: nil,
},
"ap-southeast-4": {
Name: nil,
DnsSuffix: nil,
DualStackDnsSuffix: nil,
SupportsFIPS: nil,
SupportsDualStack: nil,
},
"aws-global": {
Name: nil,
DnsSuffix: nil,
@ -160,6 +167,13 @@ var partitions = []Partition{
SupportsFIPS: nil,
SupportsDualStack: nil,
},
"il-central-1": {
Name: nil,
DnsSuffix: nil,
DualStackDnsSuffix: nil,
SupportsFIPS: nil,
SupportsDualStack: nil,
},
"me-central-1": {
Name: nil,
DnsSuffix: nil,
@ -340,4 +354,28 @@ var partitions = []Partition{
},
},
},
{
ID: "aws-iso-e",
RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$",
DefaultConfig: PartitionConfig{
Name: "aws-iso-e",
DnsSuffix: "cloud.adc-e.uk",
DualStackDnsSuffix: "cloud.adc-e.uk",
SupportsFIPS: true,
SupportsDualStack: false,
},
Regions: map[string]RegionOverrides{},
},
{
ID: "aws-iso-f",
RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$",
DefaultConfig: PartitionConfig{
Name: "aws-iso-f",
DnsSuffix: "csp.hci.ic.gov",
DualStackDnsSuffix: "csp.hci.ic.gov",
SupportsFIPS: true,
SupportsDualStack: false,
},
Regions: map[string]RegionOverrides{},
},
}

View file

@ -1,3 +1,27 @@
# v2.5.9 (2023-12-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.5.8 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.5.7 (2023-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.5.6 (2023-11-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.5.5 (2023-11-28.2)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.5.4 (2023-11-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v2.5.3 (2023-11-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package endpoints
// goModuleVersion is the tagged release for this module
const goModuleVersion = "2.5.3"
const goModuleVersion = "2.5.9"

View file

@ -1,3 +1,11 @@
# v1.7.2 (2023-12-08)
* **Bug Fix**: Correct loading of [services *] sections into shared config.
# v1.7.1 (2023-11-16)
* **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it.
# v1.7.0 (2023-11-13)
* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section.

View file

@ -3,4 +3,4 @@
package ini
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.7.0"
const goModuleVersion = "1.7.2"

View file

@ -82,7 +82,7 @@ func (p *parser) handleSubProperty(tok *lineTokenSubProperty) {
// "promote" this to a normal property.
p.handleProperty(&lineTokenProperty{
Key: tok.Key,
Value: strings.TrimSpace(trimComment(tok.Value)),
Value: strings.TrimSpace(trimPropertyComment(tok.Value)),
})
return
}

View file

@ -1,11 +1,21 @@
package ini
import "strings"
import (
"strings"
)
func trimComment(v string) string {
rest, _, _ := strings.Cut(v, "#")
rest, _, _ = strings.Cut(rest, ";")
return rest
func trimProfileComment(s string) string {
r, _, _ := strings.Cut(s, "#")
r, _, _ = strings.Cut(r, ";")
return r
}
func trimPropertyComment(s string) string {
r, _, _ := strings.Cut(s, " #")
r, _, _ = strings.Cut(r, " ;")
r, _, _ = strings.Cut(r, "\t#")
r, _, _ = strings.Cut(r, "\t;")
return r
}
// assumes no surrounding comment

View file

@ -30,7 +30,7 @@ func isLineComment(line string) bool {
}
func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment"
trimmed := strings.TrimSpace(trimComment(line)) // "[ type name ]"
trimmed := strings.TrimSpace(trimProfileComment(line)) // "[ type name ]"
if !isBracketed(trimmed) {
return nil
}
@ -48,7 +48,8 @@ func asProperty(line string) *lineTokenProperty {
return nil
}
trimmed := strings.TrimRight(trimComment(line), " \t")
trimmed := trimPropertyComment(line)
trimmed = strings.TrimRight(trimmed, " \t")
k, v, ok := splitProperty(trimmed)
if !ok {
return nil

View file

@ -54,18 +54,7 @@ func (v Value) String() string {
// MapValue returns a map value for sub properties
func (v Value) MapValue() map[string]string {
newlineParts := strings.Split(string(v.str), "\n")
mp := make(map[string]string)
for _, part := range newlineParts {
operandParts := strings.Split(part, "=")
if len(operandParts) < 2 {
continue
}
key := strings.TrimSpace(operandParts[0])
val := strings.TrimSpace(operandParts[1])
mp[key] = val
}
return mp
return v.mp
}
// IntValue returns an integer value

View file

@ -1,3 +1,27 @@
# v1.2.9 (2023-12-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.8 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.7 (2023-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.6 (2023-11-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.5 (2023-11-28.2)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.4 (2023-11-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.3 (2023-11-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package v4a
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.2.3"
const goModuleVersion = "1.2.9"

View file

@ -39,7 +39,7 @@ func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.P
) {
creds, err := v.Provider.RetrievePrivateKey(ctx)
if err != nil {
return nil, fmt.Errorf("get credentials: %v", err)
return nil, fmt.Errorf("get credentials: %w", err)
}
return &CredentialsAdapter{Credentials: creds}, nil
@ -79,7 +79,7 @@ func (v *SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request,
o.LogSigning = v.LogSigning
})
if err != nil {
return fmt.Errorf("sign http: %v", err)
return fmt.Errorf("sign http: %w", err)
}
return nil

View file

@ -1,3 +1,15 @@
# v1.10.4 (2023-12-07)
* No change notes available for this release.
# v1.10.3 (2023-11-30)
* No change notes available for this release.
# v1.10.2 (2023-11-29)
* No change notes available for this release.
# v1.10.1 (2023-11-15)
* No change notes available for this release.

View file

@ -3,4 +3,4 @@
package acceptencoding
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.10.1"
const goModuleVersion = "1.10.4"

View file

@ -1,3 +1,27 @@
# v1.2.9 (2023-12-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.8 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.7 (2023-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.6 (2023-11-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.5 (2023-11-28.2)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.4 (2023-11-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.2.3 (2023-11-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package checksum
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.2.3"
const goModuleVersion = "1.2.9"

View file

@ -9,6 +9,7 @@ import (
"strconv"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@ -377,7 +378,7 @@ func (m *addInputChecksumTrailer) HandleFinalize(
}
func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) {
ctxAlgorithm := getContextInputAlgorithm(ctx)
ctxAlgorithm := internalcontext.GetChecksumInputAlgorithm(ctx)
if ctxAlgorithm == "" {
return "", false, nil
}

View file

@ -3,6 +3,7 @@ package checksum
import (
"context"
internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
"github.com/aws/smithy-go/middleware"
)
@ -35,33 +36,13 @@ func (m *setupInputContext) HandleInitialize(
// check is input resource has a checksum algorithm
algorithm, ok := m.GetAlgorithm(in.Parameters)
if ok && len(algorithm) != 0 {
ctx = setContextInputAlgorithm(ctx, algorithm)
ctx = internalcontext.SetChecksumInputAlgorithm(ctx, algorithm)
}
}
return next.HandleInitialize(ctx, in)
}
// inputAlgorithmKey is the key set on context used to identify, retrieves the
// request checksum algorithm if present on the context.
type inputAlgorithmKey struct{}
// setContextInputAlgorithm sets the request checksum algorithm on the context.
//
// Scoped to stack values.
func setContextInputAlgorithm(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, inputAlgorithmKey{}, value)
}
// getContextInputAlgorithm returns the checksum algorithm from the context if
// one was specified. Empty string is returned if one is not specified.
//
// Scoped to stack values.
func getContextInputAlgorithm(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, inputAlgorithmKey{}).(string)
return v
}
type setupOutputContext struct {
// GetValidationMode is a function to get the checksum validation
// mode of the output payload from the input parameters.

View file

@ -1,3 +1,27 @@
# v1.10.9 (2023-12-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.8 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.7 (2023-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.6 (2023-11-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.5 (2023-11-28.2)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.4 (2023-11-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.3 (2023-11-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package presignedurl
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.10.3"
const goModuleVersion = "1.10.9"

View file

@ -1,3 +1,27 @@
# v1.16.9 (2023-12-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.8 (2023-12-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.7 (2023-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.6 (2023-11-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.5 (2023-11-28.2)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.4 (2023-11-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.16.3 (2023-11-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package s3shared
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.16.3"
const goModuleVersion = "1.16.9"

View file

@ -1,3 +1,60 @@
# v1.47.5 (2023-12-08)
* **Bug Fix**: Add non-vhostable buckets to request path when using legacy V1 endpoint resolver.
* **Bug Fix**: Improve uniqueness of default S3Express sesssion credentials cache keying to prevent collision in multi-credential scenarios.
* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
# v1.47.4 (2023-12-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.47.3 (2023-12-06)
* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
# v1.47.2 (2023-12-01)
* **Bug Fix**: Correct wrapping of errors in authentication workflow.
* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.47.1 (2023-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.47.0 (2023-11-29)
* **Feature**: Expose Options() accessor on service clients.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.46.0 (2023-11-28.2)
* **Feature**: Add S3Express support.
* **Feature**: Adds support for S3 Express One Zone.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.45.1 (2023-11-28)
* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
# v1.45.0 (2023-11-27)
* **Feature**: Adding new params - Key and Prefix, to S3 API operations for supporting S3 Access Grants. Note - These updates will not change any of the existing S3 API functionality.
# v1.44.0 (2023-11-21)
* **Feature**: Add support for automatic date based partitioning in S3 Server Access Logs.
* **Bug Fix**: Don't send MaxKeys/MaxUploads=0 when unspecified in ListObjectVersions and ListMultipartUploads paginators.
# v1.43.1 (2023-11-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.43.0 (2023-11-17)
* **Feature**: **BREAKING CHANGE** Correct nullability of a large number of S3 structure fields. See https://github.com/aws/aws-sdk-go-v2/issues/2162.
* **Feature**: Removes all default 0 values for numbers and false values for booleans
# v1.42.2 (2023-11-15)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -66,10 +66,14 @@ func New(options Options, optFns ...func(*Options)) *Client {
fn(&options)
}
finalizeRetryMaxAttempts(&options)
resolveCredentialProvider(&options)
ignoreAnonymousAuth(&options)
resolveExpressCredentials(&options)
finalizeServiceEndpointAuthResolver(&options)
resolveAuthSchemes(&options)
@ -78,9 +82,20 @@ func New(options Options, optFns ...func(*Options)) *Client {
options: options,
}
finalizeExpressCredentials(&options, client)
return client
}
// Options returns a copy of the client configuration.
//
// Callers SHOULD NOT perform mutations on any inner structures within client
// config. Config overrides should instead be made on a per-operation basis through
// functional options.
func (c *Client) Options() Options {
return c.options.Copy()
}
func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
ctx = middleware.ClearStackValues(ctx)
stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
@ -92,12 +107,14 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf
setSafeEventStreamClientLogMode(&options, opID)
finalizeRetryMaxAttemptOptions(&options, *c)
finalizeOperationRetryMaxAttempts(&options, *c)
finalizeClientEndpointResolverOptions(&options)
resolveCredentialProvider(&options)
finalizeOperationExpressCredentials(&options, *c)
finalizeOperationEndpointAuthResolver(&options)
for _, fn := range stackFns {
@ -150,7 +167,7 @@ func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in mi
func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
return fmt.Errorf("add ResolveAuthScheme: %v", err)
return fmt.Errorf("add ResolveAuthScheme: %w", err)
}
if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
return fmt.Errorf("add GetIdentity: %v", err)
@ -159,7 +176,7 @@ func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, o
return fmt.Errorf("add ResolveEndpointV2: %v", err)
}
if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil {
return fmt.Errorf("add Signing: %v", err)
return fmt.Errorf("add Signing: %w", err)
}
return nil
}
@ -177,6 +194,11 @@ func resolveAuthSchemes(options *Options) {
Logger: options.Logger,
LogSigning: options.ClientLogMode.IsSigning(),
}),
internalauth.NewHTTPAuthScheme("com.amazonaws.s3#sigv4express", &s3cust.ExpressSigner{
Signer: options.HTTPSignerV4,
Logger: options.Logger,
LogSigning: options.ClientLogMode.IsSigning(),
}),
internalauth.NewHTTPAuthScheme("aws.auth#sigv4a", &v4a.SignerAdapter{
Signer: options.httpSignerV4a,
Logger: options.Logger,
@ -257,6 +279,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
resolveAWSEndpointResolver(cfg, &opts)
resolveUseARNRegion(cfg, &opts)
resolveDisableMultiRegionAccessPoints(cfg, &opts)
resolveDisableExpressAuth(cfg, &opts)
resolveUseDualStackEndpoint(cfg, &opts)
resolveUseFIPSEndpoint(cfg, &opts)
resolveBaseEndpoint(cfg, &opts)
@ -351,7 +374,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
o.RetryMaxAttempts = cfg.RetryMaxAttempts
}
func finalizeRetryMaxAttemptOptions(o *Options, client Client) {
func finalizeRetryMaxAttempts(o *Options) {
if o.RetryMaxAttempts == 0 {
return
}
o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
}
func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
return
}
@ -721,7 +752,7 @@ func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, i
schemeID := rscheme.Scheme.SchemeID()
ctx = s3cust.SetSignerVersion(ctx, schemeID)
if schemeID == "aws.auth#sigv4" {
if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" {
if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok {
ctx = awsmiddleware.SetSigningName(ctx, sn)
}
@ -765,9 +796,10 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op
return err
}
// add multi-region access point presigner
// extended s3 presigning
signermv := s3cust.NewPresignHTTPRequestMiddleware(s3cust.PresignHTTPRequestMiddlewareOptions{
CredentialsProvider: options.Credentials,
ExpressCredentials: options.ExpressCredentials,
V4Presigner: c.Presigner,
V4aPresigner: c.presignerV4a,
LogSigning: options.ClientLogMode.IsSigning(),

View file

@ -13,18 +13,40 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// This action aborts a multipart upload. After a multipart upload is aborted, no
// additional parts can be uploaded using that upload ID. The storage consumed by
// any previously uploaded parts will be freed. However, if any part uploads are
// This operation aborts a multipart upload. After a multipart upload is aborted,
// no additional parts can be uploaded using that upload ID. The storage consumed
// by any previously uploaded parts will be freed. However, if any part uploads are
// currently in progress, those part uploads might or might not succeed. As a
// result, it might be necessary to abort a given multipart upload multiple times
// in order to completely free all storage consumed by all parts. To verify that
// all parts have been removed, so you don't get charged for the part storage, you
// should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
// action and ensure that the parts list is empty. For information about
// permissions required to use the multipart upload, see Multipart Upload and
// Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
// . The following operations are related to AbortMultipartUpload :
// all parts have been removed and prevent getting charged for the part storage,
// you should call the ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
// API operation and ensure that the parts list is empty. Directory buckets - For
// directory buckets, you must make requests for this API operation to the Zonal
// endpoint. These endpoints support virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
// requests are not supported. For more information, see Regional and Zonal
// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide. Permissions
// - General purpose bucket permissions - For information about permissions
// required to use the multipart upload, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
// in the Amazon S3 User Guide.
// - Directory bucket permissions - To grant access to this API operation on a
// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
// API operation for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
// header, you can make API requests to this operation. After the session token
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
// .
//
// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
// related to AbortMultipartUpload :
// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
@ -47,16 +69,26 @@ func (c *Client) AbortMultipartUpload(ctx context.Context, params *AbortMultipar
type AbortMultipartUploadInput struct {
// The bucket name to which the upload was taking place. When using this action
// with an access point, you must direct requests to the access point hostname. The
// access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
// access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. When you use this action with Amazon S3 on
// Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on
// Outposts hostname takes the form
// The bucket name to which the upload was taking place. Directory buckets - When
// you use this operation with a directory bucket, you must use
// virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the
// bucket name or specify the access point ARN. When using the access point ARN,
// you must direct requests to the access point hostname. The access point hostname
// takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. Access points and Object Lambda access points are
// not supported by directory buckets. S3 on Outposts - When you use this action
// with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
// hostname. The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
@ -76,18 +108,19 @@ type AbortMultipartUploadInput struct {
// This member is required.
UploadId *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets.
RequestPayer types.RequestPayer
noSmithyDocumentSerde
@ -95,13 +128,14 @@ type AbortMultipartUploadInput struct {
func (in *AbortMultipartUploadInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.Key = in.Key
}
type AbortMultipartUploadOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
// request. This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result.
@ -165,6 +199,9 @@ func (c *Client) addOperationAbortMultipartUploadMiddlewares(stack *middleware.S
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpAbortMultipartUploadValidationMiddleware(stack); err != nil {
return err
}

View file

@ -15,54 +15,81 @@ import (
// Completes a multipart upload by assembling previously uploaded parts. You first
// initiate the multipart upload and then upload all parts using the UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
// operation or the UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
// operation. After successfully uploading all relevant parts of an upload, you
// call this action to complete the upload. Upon receiving this request, Amazon S3
// concatenates all the parts in ascending order by part number to create a new
// object. In the Complete Multipart Upload request, you must provide the parts
// list. You must ensure that the parts list is complete. This action concatenates
// the parts that you provide in the list. For each part in the list, you must
// provide the part number and the ETag value, returned after that part was
// uploaded. Processing of a Complete Multipart Upload request could take several
// minutes to complete. After Amazon S3 begins processing the request, it sends an
// HTTP response header that specifies a 200 OK response. While processing is in
// progress, Amazon S3 periodically sends white space characters to keep the
// connection from timing out. A request could fail after the initial 200 OK
// response has been sent. This means that a 200 OK response can contain either a
// success or an error. If you call the S3 API directly, make sure to design your
// application to parse the contents of the response and handle it appropriately.
// If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect
// the embedded error and apply error handling per your configuration settings
// (including automatically retrying the request as appropriate). If the condition
// persists, the SDKs throws an exception (or, for the SDKs that don't use
// exceptions, they return the error). Note that if CompleteMultipartUpload fails,
// applications should be prepared to retry the failed requests. For more
// information, see Amazon S3 Error Best Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html)
// . You cannot use Content-Type: application/x-www-form-urlencoded with Complete
// Multipart Upload requests. Also, if you do not provide a Content-Type header,
// CompleteMultipartUpload returns a 200 OK response. For more information about
// multipart uploads, see Uploading Objects Using Multipart Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
// . For information about permissions required to use the multipart upload API,
// see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
// . CompleteMultipartUpload has the following special errors:
// - Error code: EntityTooSmall
// call this CompleteMultipartUpload operation to complete the upload. Upon
// receiving this request, Amazon S3 concatenates all the parts in ascending order
// by part number to create a new object. In the CompleteMultipartUpload request,
// you must provide the parts list and ensure that the parts list is complete. The
// CompleteMultipartUpload API operation concatenates the parts that you provide in
// the list. For each part in the list, you must provide the PartNumber value and
// the ETag value that are returned after that part was uploaded. The processing
// of a CompleteMultipartUpload request could take several minutes to finalize.
// After Amazon S3 begins processing the request, it sends an HTTP response header
// that specifies a 200 OK response. While processing is in progress, Amazon S3
// periodically sends white space characters to keep the connection from timing
// out. A request could fail after the initial 200 OK response has been sent. This
// means that a 200 OK response can contain either a success or an error. The
// error response might be embedded in the 200 OK response. If you call this API
// operation directly, make sure to design your application to parse the contents
// of the response and handle it appropriately. If you use Amazon Web Services
// SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply
// error handling per your configuration settings (including automatically retrying
// the request as appropriate). If the condition persists, the SDKs throw an
// exception (or, for the SDKs that don't use exceptions, they return an error).
// Note that if CompleteMultipartUpload fails, applications should be prepared to
// retry the failed requests. For more information, see Amazon S3 Error Best
// Practices (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html)
// . You can't use Content-Type: application/x-www-form-urlencoded for the
// CompleteMultipartUpload requests. Also, if you don't provide a Content-Type
// header, CompleteMultipartUpload can still return a 200 OK response. For more
// information about multipart uploads, see Uploading Objects Using Multipart
// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html)
// in the Amazon S3 User Guide. Directory buckets - For directory buckets, you must
// make requests for this API operation to the Zonal endpoint. These endpoints
// support virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
// requests are not supported. For more information, see Regional and Zonal
// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide. Permissions
// - General purpose bucket permissions - For information about permissions
// required to use the multipart upload API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
// in the Amazon S3 User Guide.
// - Directory bucket permissions - To grant access to this API operation on a
// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
// API operation for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
// header, you can make API requests to this operation. After the session token
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
// .
//
// Special errors
// - Error Code: EntityTooSmall
// - Description: Your proposed upload is smaller than the minimum allowed
// object size. Each part must be at least 5 MB in size, except the last part.
// - 400 Bad Request
// - Error code: InvalidPart
// - HTTP Status Code: 400 Bad Request
// - Error Code: InvalidPart
// - Description: One or more of the specified parts could not be found. The
// part might not have been uploaded, or the specified entity tag might not have
// matched the part's entity tag.
// - 400 Bad Request
// - Error code: InvalidPartOrder
// part might not have been uploaded, or the specified ETag might not have matched
// the uploaded part's ETag.
// - HTTP Status Code: 400 Bad Request
// - Error Code: InvalidPartOrder
// - Description: The list of parts was not in ascending order. The parts list
// must be specified in order by part number.
// - 400 Bad Request
// - Error code: NoSuchUpload
// - HTTP Status Code: 400 Bad Request
// - Error Code: NoSuchUpload
// - Description: The specified multipart upload does not exist. The upload ID
// might be invalid, or the multipart upload might have been aborted or completed.
// - 404 Not Found
// - HTTP Status Code: 404 Not Found
//
// The following operations are related to CompleteMultipartUpload :
// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
// related to CompleteMultipartUpload :
// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
@ -85,16 +112,26 @@ func (c *Client) CompleteMultipartUpload(ctx context.Context, params *CompleteMu
type CompleteMultipartUploadInput struct {
// Name of the bucket to which the multipart upload was initiated. When using this
// action with an access point, you must direct requests to the access point
// hostname. The access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
// access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. When you use this action with Amazon S3 on
// Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on
// Outposts hostname takes the form
// Name of the bucket to which the multipart upload was initiated. Directory
// buckets - When you use this operation with a directory bucket, you must use
// virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the
// bucket name or specify the access point ARN. When using the access point ARN,
// you must direct requests to the access point hostname. The access point hostname
// takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. Access points and Object Lambda access points are
// not supported by directory buckets. S3 on Outposts - When you use this action
// with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
// hostname. The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
@ -142,9 +179,9 @@ type CompleteMultipartUploadInput struct {
// in the Amazon S3 User Guide.
ChecksumSHA256 *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
// The container for the multipart upload request information.
@ -152,29 +189,34 @@ type CompleteMultipartUploadInput struct {
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets.
RequestPayer types.RequestPayer
// The server-side encryption (SSE) algorithm used to encrypt the object. This
// parameter is needed only when the object was created using a checksum algorithm.
// For more information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
// parameter is required only when the object was created using a checksum
// algorithm or if your bucket policy requires the use of SSE-C. For more
// information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html#ssec-require-condition-key)
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets.
SSECustomerAlgorithm *string
// The server-side encryption (SSE) customer managed key. This parameter is needed
// only when the object was created using a checksum algorithm. For more
// information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets.
SSECustomerKey *string
// The MD5 server-side encryption (SSE) customer managed key. This parameter is
// needed only when the object was created using a checksum algorithm. For more
// information, see Protecting data using SSE-C keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets.
SSECustomerKeyMD5 *string
noSmithyDocumentSerde
@ -182,58 +224,55 @@ type CompleteMultipartUploadInput struct {
func (in *CompleteMultipartUploadInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.Key = in.Key
}
type CompleteMultipartUploadOutput struct {
// The name of the bucket that contains the newly created object. Does not return
// the access point ARN or access point alias if used. When using this action with
// an access point, you must direct requests to the access point hostname. The
// access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
// access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. When you use this action with Amazon S3 on
// Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on
// Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
// the access point ARN or access point alias if used. Access points are not
// supported by directory buckets.
Bucket *string
// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
// encryption with Key Management Service (KMS) keys (SSE-KMS).
BucketKeyEnabled bool
// encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality
// is not supported for directory buckets.
BucketKeyEnabled *bool
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// present if it was uploaded with the object. When you use an API operation on an
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
// checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// present if it was uploaded with the object. When you use an API operation on an
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
// checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// present if it was uploaded with the object. When you use the API operation on an
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
// checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// present if it was uploaded with the object. When you use an API operation on an
// object that was uploaded using multipart uploads, this value may not be a direct
// checksum value of the full object. Instead, it's a calculation based on the
// checksum values of each individual part. For more information about how
// checksums are calculated with multipart uploads, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string
@ -249,6 +288,7 @@ type CompleteMultipartUploadOutput struct {
// If the object expiration is configured, this will contain the expiration date (
// expiry-date ) and rule ID ( rule-id ). The value of rule-id is URL-encoded.
// This functionality is not supported for directory buckets.
Expiration *string
// The object key of the newly created object.
@ -258,19 +298,21 @@ type CompleteMultipartUploadOutput struct {
Location *string
// If present, indicates that the requester was successfully charged for the
// request.
// request. This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// If present, specifies the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object.
// If present, indicates the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object. This functionality
// is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when storing this object in Amazon S3
// (for example, AES256 , aws:kms ).
// (for example, AES256 , aws:kms ). For directory buckets, only server-side
// encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
// Version ID of the newly created object, in case the bucket has versioning
// turned on.
// turned on. This functionality is not supported for directory buckets.
VersionId *string
// Metadata pertaining to the operation's result.
@ -334,6 +376,9 @@ func (c *Client) addOperationCompleteMultipartUploadMiddlewares(stack *middlewar
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpCompleteMultipartUploadValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,6 +10,7 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
"time"
)
@ -20,135 +21,89 @@ import (
// object greater than 5 GB, you must use the multipart upload Upload Part - Copy
// (UploadPartCopy) API. For more information, see Copy Object Using the REST
// Multipart Upload API (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html)
// . All copy requests must be authenticated. Additionally, you must have read
// access to the source object and write access to the destination bucket. For more
// information, see REST Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html)
// . Both the Region that you want to copy the object from and the Region that you
// want to copy the object to must be enabled for your account. A copy request
// might return an error when Amazon S3 receives the copy request or while Amazon
// S3 is copying the files. If the error occurs before the copy action starts, you
// receive a standard Amazon S3 error. If the error occurs during the copy
// operation, the error response is embedded in the 200 OK response. This means
// that a 200 OK response can contain either a success or an error. If you call
// the S3 API directly, make sure to design your application to parse the contents
// of the response and handle it appropriately. If you use Amazon Web Services
// SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply
// error handling per your configuration settings (including automatically retrying
// the request as appropriate). If the condition persists, the SDKs throws an
// exception (or, for the SDKs that don't use exceptions, they return the error).
// If the copy is successful, you receive a response with information about the
// copied object. If the request is an HTTP 1.1 request, the response is chunk
// encoded. If it were not, it would not contain the content-length, and you would
// need to read the entire body. The copy request charge is based on the storage
// class and Region that you specify for the destination object. The request can
// also result in a data retrieval charge for the source if the source storage
// class bills for data retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/)
// . Amazon S3 transfer acceleration does not support cross-Region copies. If you
// request a cross-Region copy using a transfer acceleration endpoint, you get a
// 400 Bad Request error. For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
// . Metadata When copying an object, you can preserve all metadata (the default)
// or specify new metadata. However, the access control list (ACL) is not preserved
// and is set to private for the user making the request. To override the default
// ACL setting, specify a new ACL when generating a copy request. For more
// information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
// . To specify whether you want the object metadata copied from the source object
// or replaced with metadata provided in the request, you can optionally add the
// x-amz-metadata-directive header. When you grant permissions, you can use the
// s3:x-amz-metadata-directive condition key to enforce certain metadata behavior
// when objects are uploaded. For more information, see Specifying Conditions in a
// Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html)
// in the Amazon S3 User Guide. For a complete list of Amazon S3-specific condition
// keys, see Actions, Resources, and Condition Keys for Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html)
// . x-amz-website-redirect-location is unique to each object and must be
// specified in the request headers to copy the value. x-amz-copy-source-if Headers
// To only copy an object under certain conditions, such as whether the Etag
// matches or whether the object was modified before or after a specified date, use
// the following request parameters:
// - x-amz-copy-source-if-match
// - x-amz-copy-source-if-none-match
// - x-amz-copy-source-if-unmodified-since
// - x-amz-copy-source-if-modified-since
// . You can copy individual objects between general purpose buckets, between
// directory buckets, and between general purpose buckets and directory buckets.
// Directory buckets - For directory buckets, you must make requests for this API
// operation to the Zonal endpoint. These endpoints support virtual-hosted-style
// requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style
// requests are not supported. For more information, see Regional and Zonal
// endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide. Both the Region that you want to copy the object
// from and the Region that you want to copy the object to must be enabled for your
// account. Amazon S3 transfer acceleration does not support cross-Region copies.
// If you request a cross-Region copy using a transfer acceleration endpoint, you
// get a 400 Bad Request error. For more information, see Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
// . Authentication and authorization All CopyObject requests must be
// authenticated and signed by using IAM credentials (access key ID and secret
// access key for the IAM identities). All headers with the x-amz- prefix,
// including x-amz-copy-source , must be signed. For more information, see REST
// Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html)
// . Directory buckets - You must use the IAM credentials to authenticate and
// authorize your access to the CopyObject API operation, instead of using the
// temporary security credentials through the CreateSession API operation. Amazon
// Web Services CLI or SDKs handles authentication and authorization on your
// behalf. Permissions You must have read access to the source object and write
// access to the destination bucket.
// - General purpose bucket permissions - You must have permissions in an IAM
// policy based on the source and destination bucket types in a CopyObject
// operation.
// - If the source object is in a general purpose bucket, you must have
// s3:GetObject permission to read the source object that is being copied.
// - If the destination bucket is a general purpose bucket, you must have
// s3:PubObject permission to write the object copy to the destination bucket.
// - Directory bucket permissions - You must have permissions in a bucket policy
// or an IAM identity-based policy based on the source and destination bucket types
// in a CopyObject operation.
// - If the source object that you want to copy is in a directory bucket, you
// must have the s3express:CreateSession permission in the Action element of a
// policy to read the object. By default, the session is in the ReadWrite mode.
// If you want to restrict the access, you can explicitly set the
// s3express:SessionMode condition key to ReadOnly on the copy source bucket.
// - If the copy destination is a directory bucket, you must have the
// s3express:CreateSession permission in the Action element of a policy to write
// the object to the destination. The s3express:SessionMode condition key can't
// be set to ReadOnly on the copy destination bucket. For example policies, see
// Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
// and Amazon Web Services Identity and Access Management (IAM) identity-based
// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html)
// in the Amazon S3 User Guide.
//
// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
// headers are present in the request and evaluate as follows, Amazon S3 returns
// 200 OK and copies the data:
// - x-amz-copy-source-if-match condition evaluates to true
// - x-amz-copy-source-if-unmodified-since condition evaluates to false
// Response and special errors When the request is an HTTP 1.1 request, the
// response is chunk encoded. When the request is not an HTTP 1.1 request, the
// response would not contain the Content-Length . You always need to read the
// entire response body to check if the copy succeeds. to keep the connection alive
// while we copy the data.
// - If the copy is successful, you receive a response with information about
// the copied object.
// - A copy request might return an error when Amazon S3 receives the copy
// request or while Amazon S3 is copying the files. A 200 OK response can contain
// either a success or an error.
// - If the error occurs before the copy action starts, you receive a standard
// Amazon S3 error.
// - If the error occurs during the copy operation, the error response is
// embedded in the 200 OK response. For example, in a cross-region copy, you may
// encounter throttling and receive a 200 OK response. For more information, see
// Resolve the Error 200 response when copying objects to Amazon S3 . The 200 OK
// status code means the copy was accepted, but it doesn't mean the copy is
// complete. Another example is when you disconnect from Amazon S3 before the copy
// is complete, Amazon S3 might cancel the copy and you may receive a 200 OK
// response. You must stay connected to Amazon S3 until the entire response is
// successfully received and processed. If you call this API operation directly,
// make sure to design your application to parse the content of the response and
// handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this
// condition. The SDKs detect the embedded error and apply error handling per your
// configuration settings (including automatically retrying the request as
// appropriate). If the condition persists, the SDKs throw an exception (or, for
// the SDKs that don't use exceptions, they return an error).
//
// If both the x-amz-copy-source-if-none-match and
// x-amz-copy-source-if-modified-since headers are present in the request and
// evaluate as follows, Amazon S3 returns the 412 Precondition Failed response
// code:
// - x-amz-copy-source-if-none-match condition evaluates to false
// - x-amz-copy-source-if-modified-since condition evaluates to true
//
// All headers with the x-amz- prefix, including x-amz-copy-source , must be
// signed. Server-side encryption Amazon S3 automatically encrypts all new objects
// that are copied to an S3 bucket. When copying an object, if you don't specify
// encryption information in your copy request, the encryption setting of the
// target object is set to the default encryption configuration of the destination
// bucket. By default, all buckets have a base level of encryption configuration
// that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the
// destination bucket has a default encryption configuration that uses server-side
// encryption with Key Management Service (KMS) keys (SSE-KMS), dual-layer
// server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), or
// server-side encryption with customer-provided encryption keys (SSE-C), Amazon S3
// uses the corresponding KMS key, or a customer-provided key to encrypt the target
// object copy. When you perform a CopyObject operation, if you want to use a
// different type of encryption setting for the target object, you can use other
// appropriate encryption-related headers to encrypt the target object with a KMS
// key, an Amazon S3 managed key, or a customer-provided key. With server-side
// encryption, Amazon S3 encrypts your data as it writes your data to disks in its
// data centers and decrypts the data when you access it. If the encryption setting
// in your request is different from the default encryption configuration of the
// destination bucket, the encryption setting in your request takes precedence. If
// the source object for the copy is stored in Amazon S3 using SSE-C, you must
// provide the necessary encryption information in your request so that Amazon S3
// can decrypt the object for copying. For more information about server-side
// encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
// . If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the
// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
// in the Amazon S3 User Guide. Access Control List (ACL)-Specific Request Headers
// When copying an object, you can optionally use headers to grant ACL-based
// permissions. By default, all objects are private. Only the owner has full access
// control. When adding a new object, you can grant permissions to individual
// Amazon Web Services accounts or to predefined groups that are defined by Amazon
// S3. These permissions are then added to the ACL on the object. For more
// information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
// and Managing ACLs Using the REST API (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html)
// . If the bucket that you're copying objects to uses the bucket owner enforced
// setting for S3 Object Ownership, ACLs are disabled and no longer affect
// permissions. Buckets that use this setting only accept PUT requests that don't
// specify an ACL or PUT requests that specify bucket owner full control ACLs,
// such as the bucket-owner-full-control canned ACL or an equivalent form of this
// ACL expressed in the XML format. For more information, see Controlling
// ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced
// setting for Object Ownership, all objects written to the bucket by any account
// will be owned by the bucket owner. Checksums When copying an object, if it has a
// checksum, that checksum will be copied to the new object by default. When you
// copy the object over, you can optionally specify a different checksum algorithm
// to use with the x-amz-checksum-algorithm header. Storage Class Options You can
// use the CopyObject action to change the storage class of an object that is
// already stored in Amazon S3 by using the StorageClass parameter. For more
// information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// in the Amazon S3 User Guide. If the source object's storage class is GLACIER or
// DEEP_ARCHIVE, or the object's storage class is INTELLIGENT_TIERING and it's S3
// Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition)
// is Archive Access or Deep Archive Access, you must restore a copy of this object
// before you can use it as a source object for the copy operation. For more
// information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
// . For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html)
// . Versioning By default, x-amz-copy-source header identifies the current
// version of an object to copy. If the current version is a delete marker, Amazon
// S3 behaves as if the object was deleted. To copy a different version, use the
// versionId subresource. If you enable versioning on the target bucket, Amazon S3
// generates a unique version ID for the object being copied. This version ID is
// different from the version ID of the source object. Amazon S3 returns the
// version ID of the copied object in the x-amz-version-id response header in the
// response. If you do not enable versioning or suspend it on the target bucket,
// the version ID that Amazon S3 generates is always null. The following operations
// are related to CopyObject :
// Charge The copy request charge is based on the storage class and Region that
// you specify for the destination object. The request can also result in a data
// retrieval charge for the source if the source storage class bills for data
// retrieval. For pricing information, see Amazon S3 pricing (http://aws.amazon.com/s3/pricing/)
// . HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
// related to CopyObject :
// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
// - GetObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) {
@ -168,16 +123,26 @@ func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns
type CopyObjectInput struct {
// The name of the destination bucket. When using this action with an access
// point, you must direct requests to the access point hostname. The access point
// hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
// access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. When you use this action with Amazon S3 on
// Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on
// Outposts hostname takes the form
// The name of the destination bucket. Directory buckets - When you use this
// operation with a directory bucket, you must use virtual-hosted-style requests in
// the format Bucket_name.s3express-az_id.region.amazonaws.com . Path-style
// requests are not supported. Directory bucket names must be unique in the chosen
// Availability Zone. Bucket names must follow the format
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the
// bucket name or specify the access point ARN. When using the access point ARN,
// you must direct requests to the access point hostname. The access point hostname
// takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. Access points and Object Lambda access points are
// not supported by directory buckets. S3 on Outposts - When you use this action
// with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
// hostname. The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
@ -187,35 +152,52 @@ type CopyObjectInput struct {
// This member is required.
Bucket *string
// Specifies the source object for the copy operation. You specify the value in
// one of two formats, depending on whether you want to access the source object
// through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html)
// Specifies the source object for the copy operation. The source object can be up
// to 5 GB. If the source object is an object that was uploaded by using a
// multipart upload, the object copy will be a single part object after the source
// object is copied to the destination bucket. You specify the value of the copy
// source in one of two formats, depending on whether you want to access the source
// object through an access point (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html)
// :
// - For objects not accessed through an access point, specify the name of the
// source bucket and the key of the source object, separated by a slash (/). For
// example, to copy the object reports/january.pdf from the bucket
// awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value must
// be URL-encoded.
// example, to copy the object reports/january.pdf from the general purpose
// bucket awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value
// must be URL-encoded. To copy the object reports/january.pdf from the directory
// bucket awsexamplebucket--use1-az5--x-s3 , use
// awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be
// URL-encoded.
// - For objects accessed through access points, specify the Amazon Resource
// Name (ARN) of the object as accessed through the access point, in the format
// arn:aws:s3:::accesspoint//object/ . For example, to copy the object
// reports/january.pdf through access point my-access-point owned by account
// 123456789012 in Region us-west-2 , use the URL encoding of
// arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
// . The value must be URL encoded. Amazon S3 supports copy operations using access
// points only when the source and destination buckets are in the same Amazon Web
// Services Region. Alternatively, for objects accessed through Amazon S3 on
// Outposts, specify the ARN of the object as accessed in the format
// arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object
// reports/january.pdf through outpost my-outpost owned by account 123456789012
// in Region us-west-2 , use the URL encoding of
// . The value must be URL encoded.
// - Amazon S3 supports copy operations using Access points only when the source
// and destination buckets are in the same Amazon Web Services Region.
// - Access points are not supported by directory buckets. Alternatively, for
// objects accessed through Amazon S3 on Outposts, specify the ARN of the object as
// accessed in the format arn:aws:s3-outposts:::outpost//object/ . For example,
// to copy the object reports/january.pdf through outpost my-outpost owned by
// account 123456789012 in Region us-west-2 , use the URL encoding of
// arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
// . The value must be URL-encoded.
// To copy a specific version of an object, append ?versionId= to the value (for
// example,
// If your source bucket versioning is enabled, the x-amz-copy-source header by
// default identifies the current version of an object to copy. If the current
// version is a delete marker, Amazon S3 behaves as if the object was deleted. To
// copy a different version, use the versionId query parameter. Specifically,
// append ?versionId= to the value (for example,
// awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
// ). If you don't specify a version ID, Amazon S3 copies the latest version of the
// source object.
// source object. If you enable versioning on the destination bucket, Amazon S3
// generates a unique version ID for the copied object. This version ID is
// different from the version ID of the source object. Amazon S3 returns the
// version ID of the copied object in the x-amz-version-id response header in the
// response. If you do not enable versioning or suspend it on the destination
// bucket, the version ID that Amazon S3 generates in the x-amz-version-id
// response header is always null. Directory buckets - S3 Versioning isn't enabled
// and supported for directory buckets.
//
// This member is required.
CopySource *string
@ -225,138 +207,230 @@ type CopyObjectInput struct {
// This member is required.
Key *string
// The canned ACL to apply to the object. This action is not supported by Amazon
// S3 on Outposts.
// The canned access control list (ACL) to apply to the object. When you copy an
// object, the ACL metadata is not preserved and is set to private by default.
// Only the owner has full access control. To override the default ACL setting,
// specify a new ACL when you generate a copy request. For more information, see
// Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
// . If the destination bucket that you're copying objects to uses the bucket owner
// enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect
// permissions. Buckets that use this setting only accept PUT requests that don't
// specify an ACL or PUT requests that specify bucket owner full control ACLs,
// such as the bucket-owner-full-control canned ACL or an equivalent form of this
// ACL expressed in the XML format. For more information, see Controlling
// ownership of objects and disabling ACLs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// in the Amazon S3 User Guide.
// - If your destination bucket uses the bucket owner enforced setting for
// Object Ownership, all objects written to the bucket by any account will be owned
// by the bucket owner.
// - This functionality is not supported for directory buckets.
// - This functionality is not supported for Amazon S3 on Outposts.
ACL types.ObjectCannedACL
// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
// with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object.
// Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object
// encryption with SSE-KMS. Specifying this header with a COPY action doesnt
// affect bucket-level settings for S3 Bucket Key.
BucketKeyEnabled bool
// affect bucket-level settings for S3 Bucket Key. For more information, see
// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html)
// in the Amazon S3 User Guide. This functionality is not supported when the
// destination bucket is a directory bucket.
BucketKeyEnabled *bool
// Specifies caching behavior along the request/reply chain.
// Specifies the caching behavior along the request/reply chain.
CacheControl *string
// Indicates the algorithm you want Amazon S3 to use to create the checksum for
// the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
// Indicates the algorithm that you want Amazon S3 to use to create the checksum
// for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide. When you copy an object, if the source object has a
// checksum, that checksum value will be copied to the new object by default. If
// the CopyObject request does not include this x-amz-checksum-algorithm header,
// the checksum algorithm will be copied from the source object to the destination
// object (if it's present on the source object). You can optionally specify a
// different checksum algorithm to use with the x-amz-checksum-algorithm header.
// Unrecognized or unsupported values will respond with the HTTP status code 400
// Bad Request . For directory buckets, when you use Amazon Web Services SDKs,
// CRC32 is the default checksum algorithm that's used for performance.
ChecksumAlgorithm types.ChecksumAlgorithm
// Specifies presentational information for the object.
// Specifies presentational information for the object. Indicates whether an
// object should be displayed in a web browser or downloaded as a file. It allows
// specifying the desired filename for the downloaded file.
ContentDisposition *string
// Specifies what content encodings have been applied to the object and thus what
// decoding mechanisms must be applied to obtain the media-type referenced by the
// Content-Type header field.
// Content-Type header field. For directory buckets, only the aws-chunked value is
// supported in this header field.
ContentEncoding *string
// The language the content is in.
ContentLanguage *string
// A standard MIME type describing the format of the object data.
// A standard MIME type that describes the format of the object data.
ContentType *string
// Copies the object if its entity tag (ETag) matches the specified tag.
// Copies the object if its entity tag (ETag) matches the specified tag. If both
// the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
// headers are present in the request and evaluate as follows, Amazon S3 returns
// 200 OK and copies the data:
// - x-amz-copy-source-if-match condition evaluates to true
// - x-amz-copy-source-if-unmodified-since condition evaluates to false
CopySourceIfMatch *string
// Copies the object if it has been modified since the specified time.
// Copies the object if it has been modified since the specified time. If both the
// x-amz-copy-source-if-none-match and x-amz-copy-source-if-modified-since headers
// are present in the request and evaluate as follows, Amazon S3 returns the 412
// Precondition Failed response code:
// - x-amz-copy-source-if-none-match condition evaluates to false
// - x-amz-copy-source-if-modified-since condition evaluates to true
CopySourceIfModifiedSince *time.Time
// Copies the object if its entity tag (ETag) is different than the specified ETag.
// Copies the object if its entity tag (ETag) is different than the specified
// ETag. If both the x-amz-copy-source-if-none-match and
// x-amz-copy-source-if-modified-since headers are present in the request and
// evaluate as follows, Amazon S3 returns the 412 Precondition Failed response
// code:
// - x-amz-copy-source-if-none-match condition evaluates to false
// - x-amz-copy-source-if-modified-since condition evaluates to true
CopySourceIfNoneMatch *string
// Copies the object if it hasn't been modified since the specified time.
// Copies the object if it hasn't been modified since the specified time. If both
// the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
// headers are present in the request and evaluate as follows, Amazon S3 returns
// 200 OK and copies the data:
// - x-amz-copy-source-if-match condition evaluates to true
// - x-amz-copy-source-if-unmodified-since condition evaluates to false
CopySourceIfUnmodifiedSince *time.Time
// Specifies the algorithm to use when decrypting the source object (for example,
// AES256).
// AES256 ). If the source object for the copy is stored in Amazon S3 using SSE-C,
// you must provide the necessary encryption information in your request so that
// Amazon S3 can decrypt the object for copying. This functionality is not
// supported when the source object is in a directory bucket.
CopySourceSSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
// the source object. The encryption key provided in this header must be one that
// was used when the source object was created.
// the source object. The encryption key provided in this header must be the same
// one that was used when the source object was created. If the source object for
// the copy is stored in Amazon S3 using SSE-C, you must provide the necessary
// encryption information in your request so that Amazon S3 can decrypt the object
// for copying. This functionality is not supported when the source object is in a
// directory bucket.
CopySourceSSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
// encryption key was transmitted without error. If the source object for the copy
// is stored in Amazon S3 using SSE-C, you must provide the necessary encryption
// information in your request so that Amazon S3 can decrypt the object for
// copying. This functionality is not supported when the source object is in a
// directory bucket.
CopySourceSSECustomerKeyMD5 *string
// The account ID of the expected destination bucket owner. If the destination
// bucket is owned by a different account, the request fails with the HTTP status
// code 403 Forbidden (access denied).
// The account ID of the expected destination bucket owner. If the account ID that
// you provide does not match the actual owner of the destination bucket, the
// request fails with the HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
// The account ID of the expected source bucket owner. If the source bucket is
// owned by a different account, the request fails with the HTTP status code 403
// Forbidden (access denied).
// The account ID of the expected source bucket owner. If the account ID that you
// provide does not match the actual owner of the source bucket, the request fails
// with the HTTP status code 403 Forbidden (access denied).
ExpectedSourceBucketOwner *string
// The date and time at which the object is no longer cacheable.
Expires *time.Time
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This
// action is not supported by Amazon S3 on Outposts.
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
// - This functionality is not supported for directory buckets.
// - This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string
// Allows grantee to read the object data and its metadata. This action is not
// supported by Amazon S3 on Outposts.
// Allows grantee to read the object data and its metadata.
// - This functionality is not supported for directory buckets.
// - This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string
// Allows grantee to read the object ACL. This action is not supported by Amazon
// S3 on Outposts.
// Allows grantee to read the object ACL.
// - This functionality is not supported for directory buckets.
// - This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string
// Allows grantee to write the ACL for the applicable object. This action is not
// supported by Amazon S3 on Outposts.
// Allows grantee to write the ACL for the applicable object.
// - This functionality is not supported for directory buckets.
// - This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string
// A map of metadata to store with the object in S3.
Metadata map[string]string
// Specifies whether the metadata is copied from the source object or replaced
// with metadata provided in the request.
// with metadata that's provided in the request. When copying an object, you can
// preserve all metadata (the default) or specify new metadata. If this header
// isnt specified, COPY is the default behavior. General purpose bucket - For
// general purpose buckets, when you grant permissions, you can use the
// s3:x-amz-metadata-directive condition key to enforce certain metadata behavior
// when objects are uploaded. For more information, see Amazon S3 condition key
// examples (https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html)
// in the Amazon S3 User Guide. x-amz-website-redirect-location is unique to each
// object and is not copied when using the x-amz-metadata-directive header. To
// copy the value, you must specify x-amz-website-redirect-location in the request
// header.
MetadataDirective types.MetadataDirective
// Specifies whether you want to apply a legal hold to the copied object.
// Specifies whether you want to apply a legal hold to the object copy. This
// functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
// The Object Lock mode that you want to apply to the copied object.
// The Object Lock mode that you want to apply to the object copy. This
// functionality is not supported for directory buckets.
ObjectLockMode types.ObjectLockMode
// The date and time when you want the copied object's Object Lock to expire.
// The date and time when you want the Object Lock of the object copy to expire.
// This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets.
RequestPayer types.RequestPayer
// Specifies the algorithm to use to when encrypting the object (for example,
// AES256).
// Specifies the algorithm to use when encrypting the object (for example, AES256
// ). When you perform a CopyObject operation, if you want to use a different type
// of encryption setting for the target object, you can specify appropriate
// encryption-related headers to encrypt the target object with an Amazon S3
// managed key, a KMS key, or a customer-provided key. If the encryption setting in
// your request is different from the default encryption configuration of the
// destination bucket, the encryption setting in your request takes precedence.
// This functionality is not supported when the destination bucket is a directory
// bucket.
SSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use in
// encrypting data. This value is used to store the object and then it is
// discarded; Amazon S3 does not store the encryption key. The key must be
// discarded. Amazon S3 does not store the encryption key. The key must be
// appropriate for use with the algorithm specified in the
// x-amz-server-side-encryption-customer-algorithm header.
// x-amz-server-side-encryption-customer-algorithm header. This functionality is
// not supported when the destination bucket is a directory bucket.
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
// encryption key was transmitted without error. This functionality is not
// supported when the destination bucket is a directory bucket.
SSECustomerKeyMD5 *string
// Specifies the Amazon Web Services KMS Encryption Context to use for object
// encryption. The value of this header is a base64-encoded UTF-8 string holding
// JSON with the encryption context key-value pairs.
// JSON with the encryption context key-value pairs. This value must be explicitly
// added to specify encryption context for CopyObject requests. This functionality
// is not supported when the destination bucket is a directory bucket.
SSEKMSEncryptionContext *string
// Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object
@ -364,35 +438,132 @@ type CopyObjectInput struct {
// they're not made via SSL or using SigV4. For information about configuring any
// of the officially supported Amazon Web Services SDKs and Amazon Web Services
// CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
// in the Amazon S3 User Guide.
// in the Amazon S3 User Guide. This functionality is not supported when the
// destination bucket is a directory bucket.
SSEKMSKeyId *string
// The server-side encryption algorithm used when storing this object in Amazon S3
// (for example, AES256 , aws:kms , aws:kms:dsse ).
// (for example, AES256 , aws:kms , aws:kms:dsse ). Unrecognized or unsupported
// values wont write a destination object and will receive a 400 Bad Request
// response. Amazon S3 automatically encrypts all new objects that are copied to an
// S3 bucket. When copying an object, if you don't specify encryption information
// in your copy request, the encryption setting of the target object is set to the
// default encryption configuration of the destination bucket. By default, all
// buckets have a base level of encryption configuration that uses server-side
// encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a
// default encryption configuration that uses server-side encryption with Key
// Management Service (KMS) keys (SSE-KMS), dual-layer server-side encryption with
// Amazon Web Services KMS keys (DSSE-KMS), or server-side encryption with
// customer-provided encryption keys (SSE-C), Amazon S3 uses the corresponding KMS
// key, or a customer-provided key to encrypt the target object copy. When you
// perform a CopyObject operation, if you want to use a different type of
// encryption setting for the target object, you can specify appropriate
// encryption-related headers to encrypt the target object with an Amazon S3
// managed key, a KMS key, or a customer-provided key. If the encryption setting in
// your request is different from the default encryption configuration of the
// destination bucket, the encryption setting in your request takes precedence.
// With server-side encryption, Amazon S3 encrypts your data as it writes your data
// to disks in its data centers and decrypts the data when you access it. For more
// information about server-side encryption, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
// in the Amazon S3 User Guide. For directory buckets, only server-side encryption
// with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
// If the x-amz-storage-class header is not used, the copied object will be stored
// in the STANDARD Storage Class by default. The STANDARD storage class provides
// high durability and high availability. Depending on performance needs, you can
// specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS
// Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// specify a different Storage Class.
// - Directory buckets - For directory buckets, only the S3 Express One Zone
// storage class is supported to store newly created objects. Unsupported storage
// class values won't write a destination object and will respond with the HTTP
// status code 400 Bad Request .
// - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class.
// You can use the CopyObject action to change the storage class of an object that
// is already stored in Amazon S3 by using the x-amz-storage-class header. For
// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// in the Amazon S3 User Guide. Before using an object as a source object for the
// copy operation, you must restore a copy of it if it meets any of the following
// conditions:
// - The storage class of the source object is GLACIER or DEEP_ARCHIVE .
// - The storage class of the source object is INTELLIGENT_TIERING and it's S3
// Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition)
// is Archive Access or Deep Archive Access .
// For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html)
// and Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html)
// in the Amazon S3 User Guide.
StorageClass types.StorageClass
// The tag-set for the object destination object this value must be used in
// conjunction with the TaggingDirective . The tag-set must be encoded as URL Query
// parameters.
// The tag-set for the object copy in the destination bucket. This value must be
// used in conjunction with the x-amz-tagging-directive if you choose REPLACE for
// the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive
// , you don't need to set the x-amz-tagging header, because the tag-set will be
// copied from the source object directly. The tag-set must be encoded as URL Query
// parameters. The default value is the empty value. Directory buckets - For
// directory buckets in a CopyObject operation, only the empty tag-set is
// supported. Any requests that attempt to write non-empty tags into directory
// buckets will receive a 501 Not Implemented status code. When the destination
// bucket is a directory bucket, you will receive a 501 Not Implemented response
// in any of the following situations:
// - When you attempt to COPY the tag-set from an S3 source object that has
// non-empty tags.
// - When you attempt to REPLACE the tag-set of a source object and set a
// non-empty value to x-amz-tagging .
// - When you don't set the x-amz-tagging-directive header and the source object
// has non-empty tags. This is because the default value of
// x-amz-tagging-directive is COPY .
// Because only the empty tag-set is supported for directory buckets in a
// CopyObject operation, the following situations are allowed:
// - When you attempt to COPY the tag-set from a directory bucket source object
// that has no tags to a general purpose bucket. It copies an empty tag-set to the
// destination object.
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and set the x-amz-tagging value of the directory bucket destination object to
// empty.
// - When you attempt to REPLACE the tag-set of a general purpose bucket source
// object that has non-empty tags and set the x-amz-tagging value of the
// directory bucket destination object to empty.
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and don't set the x-amz-tagging value of the directory bucket destination
// object. This is because the default value of x-amz-tagging is the empty value.
Tagging *string
// Specifies whether the object tag-set are copied from the source object or
// replaced with tag-set provided in the request.
// Specifies whether the object tag-set is copied from the source object or
// replaced with the tag-set that's provided in the request. The default value is
// COPY . Directory buckets - For directory buckets in a CopyObject operation,
// only the empty tag-set is supported. Any requests that attempt to write
// non-empty tags into directory buckets will receive a 501 Not Implemented status
// code. When the destination bucket is a directory bucket, you will receive a 501
// Not Implemented response in any of the following situations:
// - When you attempt to COPY the tag-set from an S3 source object that has
// non-empty tags.
// - When you attempt to REPLACE the tag-set of a source object and set a
// non-empty value to x-amz-tagging .
// - When you don't set the x-amz-tagging-directive header and the source object
// has non-empty tags. This is because the default value of
// x-amz-tagging-directive is COPY .
// Because only the empty tag-set is supported for directory buckets in a
// CopyObject operation, the following situations are allowed:
// - When you attempt to COPY the tag-set from a directory bucket source object
// that has no tags to a general purpose bucket. It copies an empty tag-set to the
// destination object.
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and set the x-amz-tagging value of the directory bucket destination object to
// empty.
// - When you attempt to REPLACE the tag-set of a general purpose bucket source
// object that has non-empty tags and set the x-amz-tagging value of the
// directory bucket destination object to empty.
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and don't set the x-amz-tagging value of the directory bucket destination
// object. This is because the default value of x-amz-tagging is the empty value.
TaggingDirective types.TaggingDirective
// If the bucket is configured as a website, redirects requests for this object to
// another object in the same bucket or to an external URL. Amazon S3 stores the
// value of this header in the object metadata. This value is unique to each object
// and is not copied when using the x-amz-metadata-directive header. Instead, you
// may opt to provide this header in combination with the directive.
// If the destination bucket is configured as a website, redirects requests for
// this object copy to another object in the same bucket or to an external URL.
// Amazon S3 stores the value of this header in the object metadata. This value is
// unique to each object and is not copied when using the x-amz-metadata-directive
// header. Instead, you may opt to provide this header in combination with the
// x-amz-metadata-directive header. This functionality is not supported for
// directory buckets.
WebsiteRedirectLocation *string
noSmithyDocumentSerde
@ -400,52 +571,62 @@ type CopyObjectInput struct {
func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.DisableS3ExpressSessionAuth = ptr.Bool(true)
}
type CopyObjectOutput struct {
// Indicates whether the copied object uses an S3 Bucket Key for server-side
// encryption with Key Management Service (KMS) keys (SSE-KMS).
BucketKeyEnabled bool
// encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality
// is not supported for directory buckets.
BucketKeyEnabled *bool
// Container for all response elements.
CopyObjectResult *types.CopyObjectResult
// Version of the copied object in the destination bucket.
// Version ID of the source object that was copied. This functionality is not
// supported when the source object is in a directory bucket.
CopySourceVersionId *string
// If the object expiration is configured, the response includes this header.
// If the object expiration is configured, the response includes this header. This
// functionality is not supported for directory buckets.
Expiration *string
// If present, indicates that the requester was successfully charged for the
// request.
// request. This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header confirming the encryption
// algorithm used.
// requested, the response will include this header to confirm the encryption
// algorithm that's used. This functionality is not supported for directory
// buckets.
SSECustomerAlgorithm *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to provide round-trip message
// integrity verification of the customer-provided encryption key.
// requested, the response will include this header to provide the round-trip
// message integrity verification of the customer-provided encryption key. This
// functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// If present, specifies the Amazon Web Services KMS Encryption Context to use for
// If present, indicates the Amazon Web Services KMS Encryption Context to use for
// object encryption. The value of this header is a base64-encoded UTF-8 string
// holding JSON with the encryption context key-value pairs.
// holding JSON with the encryption context key-value pairs. This functionality is
// not supported for directory buckets.
SSEKMSEncryptionContext *string
// If present, specifies the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object.
// If present, indicates the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object. This functionality
// is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when storing this object in Amazon S3
// (for example, AES256 , aws:kms , aws:kms:dsse ).
// The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256 , aws:kms , aws:kms:dsse ). For directory buckets, only
// server-side encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is
// supported.
ServerSideEncryption types.ServerSideEncryption
// Version ID of the newly created copy.
// Version ID of the newly created copy. This functionality is not supported for
// directory buckets.
VersionId *string
// Metadata pertaining to the operation's result.
@ -509,6 +690,9 @@ func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, opti
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpCopyObjectValidationMiddleware(stack); err != nil {
return err
}

View file

@ -14,64 +14,80 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Creates a new S3 bucket. To create a bucket, you must register with Amazon S3
// and have a valid Amazon Web Services Access Key ID to authenticate requests.
// This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts
// bucket, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html)
// . Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and
// have a valid Amazon Web Services Access Key ID to authenticate requests.
// Anonymous requests are never allowed to create buckets. By creating the bucket,
// you become the bucket owner. Not every string is an acceptable bucket name. For
// information about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html)
// . If you want to create an Amazon S3 on Outposts bucket, see Create Bucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html)
// . By default, the bucket is created in the US East (N. Virginia) Region. You can
// optionally specify a Region in the request body. To constrain the bucket
// creation to a specific Region, you can use LocationConstraint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketConfiguration.html)
// condition key. You might choose a Region to optimize latency, minimize costs, or
// address regulatory requirements. For example, if you reside in Europe, you will
// probably find it advantageous to create buckets in the Europe (Ireland) Region.
// For more information, see Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
// . If you send your create bucket request to the s3.amazonaws.com endpoint, the
// request goes to the us-east-1 Region. Accordingly, the signature calculations
// in Signature Version 4 must use us-east-1 as the Region, even if the location
// constraint in the request specifies another Region where the bucket is to be
// created. If you create a bucket in a Region other than US East (N. Virginia),
// your application must be able to handle 307 redirect. For more information, see
// Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html)
// . Permissions In addition to s3:CreateBucket , the following permissions are
// required when your CreateBucket request includes specific headers:
// - Access control lists (ACLs) - If your CreateBucket request specifies access
// control list (ACL) permissions and the ACL is public-read, public-read-write,
// authenticated-read, or if you specify access permissions explicitly through any
// other ACL, both s3:CreateBucket and s3:PutBucketAcl permissions are needed. If
// the ACL for the CreateBucket request is private or if the request doesn't
// specify any ACLs, only s3:CreateBucket permission is needed.
// - Object Lock - If ObjectLockEnabledForBucket is set to true in your
// CreateBucket request, s3:PutBucketObjectLockConfiguration and
// s3:PutBucketVersioning permissions are required.
// - S3 Object Ownership - If your CreateBucket request includes the
// x-amz-object-ownership header, then the s3:PutBucketOwnershipControls
// permission is required. By default, ObjectOwnership is set to
// BucketOWnerEnforced and ACLs are disabled. We recommend keeping ACLs disabled,
// except in uncommon use cases where you must control access for each object
// individually. If you want to change the ObjectOwnership setting, you can use
// the x-amz-object-ownership header in your CreateBucket request to set the
// ObjectOwnership setting of your choice. For more information about S3 Object
// Ownership, see Controlling object ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// you become the bucket owner. There are two types of buckets: general purpose
// buckets and directory buckets. For more information about these bucket types,
// see Creating, configuring, and working with Amazon S3 buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-buckets-s3.html)
// in the Amazon S3 User Guide.
// - General purpose buckets - If you send your CreateBucket request to the
// s3.amazonaws.com global endpoint, the request goes to the us-east-1 Region. So
// the signature calculations in Signature Version 4 must use us-east-1 as the
// Region, even if the location constraint in the request specifies another Region
// where the bucket is to be created. If you create a bucket in a Region other than
// US East (N. Virginia), your application must be able to handle 307 redirect. For
// more information, see Virtual hosting of buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html)
// in the Amazon S3 User Guide.
// - S3 Block Public Access - If your specific use case requires granting public
// access to your S3 resources, you can disable Block Public Access. You can create
// a new bucket with Block Public Access enabled, then separately call the
// DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock
// permission. By default, all Block Public Access settings are enabled for new
// buckets. To avoid inadvertent exposure of your resources, we recommend keeping
// the S3 Block Public Access settings enabled. For more information about S3 Block
// Public Access, see Blocking public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// - Directory buckets - For directory buckets, you must make requests for this
// API operation to the Regional endpoint. These endpoints support path-style
// requests in the format
// https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. For more information, see
// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide.
//
// If your CreateBucket request sets BucketOwnerEnforced for Amazon S3 Object
// Ownership and specifies a bucket ACL that provides access to an external Amazon
// Web Services account, your request fails with a 400 error and returns the
// InvalidBucketAcLWithObjectOwnership error code. For more information, see
// Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html)
// in the Amazon S3 User Guide. The following operations are related to
// Permissions
// - General purpose bucket permissions - In addition to the s3:CreateBucket
// permission, the following permissions are required in a policy when your
// CreateBucket request includes specific headers:
// - Access control lists (ACLs) - In your CreateBucket request, if you specify
// an access control list (ACL) and set it to public-read , public-read-write ,
// authenticated-read , or if you explicitly specify any other custom ACLs, both
// s3:CreateBucket and s3:PutBucketAcl permissions are required. In your
// CreateBucket request, if you set the ACL to private , or if you don't specify
// any ACLs, only the s3:CreateBucket permission is required.
// - Object Lock - In your CreateBucket request, if you set
// x-amz-bucket-object-lock-enabled to true, the
// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are
// required.
// - S3 Object Ownership - If your CreateBucket request includes the
// x-amz-object-ownership header, then the s3:PutBucketOwnershipControls
// permission is required. If your CreateBucket request sets BucketOwnerEnforced
// for Amazon S3 Object Ownership and specifies a bucket ACL that provides access
// to an external Amazon Web Services account, your request fails with a 400
// error and returns the InvalidBucketAcLWithObjectOwnership error code. For more
// information, see Setting Object Ownership on an existing bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-ownership-existing-bucket.html)
// in the Amazon S3 User Guide.
// - S3 Block Public Access - If your specific use case requires granting public
// access to your S3 resources, you can disable Block Public Access. Specifically,
// you can create a new bucket with Block Public Access enabled, then separately
// call the DeletePublicAccessBlock (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html)
// API. To use this operation, you must have the s3:PutBucketPublicAccessBlock
// permission. For more information about S3 Block Public Access, see Blocking
// public access to your Amazon S3 storage (https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html)
// in the Amazon S3 User Guide.
// - Directory bucket permissions - You must have the s3express:CreateBucket
// permission in an IAM identity-based policy instead of a bucket policy.
// Cross-account access to this API operation isn't supported. This operation can
// only be performed by the Amazon Web Services account that owns the resource. For
// more information about directory bucket policies and permissions, see Amazon
// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
// in the Amazon S3 User Guide. The permissions for ACLs, Object Lock, S3 Object
// Ownership, and S3 Block Public Access are not supported for directory buckets.
// For directory buckets, all Block Public Access settings are enabled at the
// bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs
// disabled). These settings can't be modified. For more information about
// permissions for creating and working with directory buckets, see Directory
// buckets (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-overview.html)
// in the Amazon S3 User Guide. For more information about supported S3 features
// for directory buckets, see Features of S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-one-zone.html#s3-express-features)
// in the Amazon S3 User Guide.
//
// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
// s3express-control.region.amazonaws.com . The following operations are related to
// CreateBucket :
// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
// - DeleteBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html)
@ -92,37 +108,52 @@ func (c *Client) CreateBucket(ctx context.Context, params *CreateBucketInput, op
type CreateBucketInput struct {
// The name of the bucket to create.
// The name of the bucket to create. General purpose buckets - For information
// about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html)
// in the Amazon S3 User Guide. Directory buckets - When you use this operation
// with a directory bucket, you must use path-style requests in the format
// https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide
//
// This member is required.
Bucket *string
// The canned ACL to apply to the bucket.
// The canned ACL to apply to the bucket. This functionality is not supported for
// directory buckets.
ACL types.BucketCannedACL
// The configuration information for the bucket.
CreateBucketConfiguration *types.CreateBucketConfiguration
// Allows grantee the read, write, read ACP, and write ACP permissions on the
// bucket.
// bucket. This functionality is not supported for directory buckets.
GrantFullControl *string
// Allows grantee to list the objects in the bucket.
// Allows grantee to list the objects in the bucket. This functionality is not
// supported for directory buckets.
GrantRead *string
// Allows grantee to read the bucket ACL.
// Allows grantee to read the bucket ACL. This functionality is not supported for
// directory buckets.
GrantReadACP *string
// Allows grantee to create new objects in the bucket. For the bucket and object
// owners of existing objects, also allows deletions and overwrites of those
// objects.
// objects. This functionality is not supported for directory buckets.
GrantWrite *string
// Allows grantee to write the ACL for the applicable bucket.
// Allows grantee to write the ACL for the applicable bucket. This functionality
// is not supported for directory buckets.
GrantWriteACP *string
// Specifies whether you want S3 Object Lock to be enabled for the new bucket.
ObjectLockEnabledForBucket bool
// This functionality is not supported for directory buckets.
ObjectLockEnabledForBucket *bool
// The container element for object ownership for a bucket's ownership controls.
// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the
@ -132,9 +163,16 @@ type CreateBucketInput struct {
// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer
// affect permissions. The bucket owner automatically owns and has full control
// over every object in the bucket. The bucket only accepts PUT requests that don't
// specify an ACL or bucket owner full control ACLs, such as the
// bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed
// in the XML format.
// specify an ACL or specify bucket owner full control ACLs (such as the predefined
// bucket-owner-full-control canned ACL or a custom ACL in XML format that grants
// the same permissions). By default, ObjectOwnership is set to BucketOwnerEnforced
// and ACLs are disabled. We recommend keeping ACLs disabled, except in uncommon
// use cases where you must control access for each object individually. For more
// information about S3 Object Ownership, see Controlling ownership of objects and
// disabling ACLs for your bucket (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets. Directory buckets use the bucket owner enforced setting for S3 Object
// Ownership.
ObjectOwnership types.ObjectOwnership
noSmithyDocumentSerde
@ -142,6 +180,7 @@ type CreateBucketInput struct {
func (in *CreateBucketInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
p.DisableAccessPoints = ptr.Bool(true)
}
@ -211,6 +250,9 @@ func (c *Client) addOperationCreateBucketMiddlewares(stack *middleware.Stack, op
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpCreateBucketValidationMiddleware(stack); err != nil {
return err
}

View file

@ -21,97 +21,111 @@ import (
// ). You also include this upload ID in the final request to either complete or
// abort the multipart upload request. For more information about multipart
// uploads, see Multipart Upload Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html)
// . If you have configured a lifecycle rule to abort incomplete multipart uploads,
// the upload must complete within the number of days specified in the bucket
// lifecycle configuration. Otherwise, the incomplete multipart upload becomes
// eligible for an abort action and Amazon S3 aborts the multipart upload. For more
// information, see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle
// Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
// . For information about the permissions required to use the multipart upload
// API, see Multipart Upload and Permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
// . For request signing, multipart upload is just a series of regular requests.
// You initiate a multipart upload, send one or more requests to upload parts, and
// then complete the multipart upload process. You sign each request individually.
// There is nothing special about signing multipart upload requests. For more
// information about signing, see Authenticating Requests (Amazon Web Services
// Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html)
// . After you initiate a multipart upload and upload one or more parts, to stop
// being charged for storing the uploaded parts, you must either complete or abort
// the multipart upload. Amazon S3 frees up the space used to store the parts and
// stop charging you for storing them only after you either complete or abort a
// multipart upload. Server-side encryption is for data encryption at rest. Amazon
// S3 encrypts your data as it writes it to disks in its data centers and decrypts
// it when you access it. Amazon S3 automatically encrypts all new objects that are
// uploaded to an S3 bucket. When doing a multipart upload, if you don't specify
// encryption information in your request, the encryption setting of the uploaded
// parts is set to the default encryption configuration of the destination bucket.
// By default, all buckets have a base level of encryption configuration that uses
// server-side encryption with Amazon S3 managed keys (SSE-S3). If the destination
// bucket has a default encryption configuration that uses server-side encryption
// with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided
// encryption key (SSE-C), Amazon S3 uses the corresponding KMS key, or a
// customer-provided key to encrypt the uploaded parts. When you perform a
// CreateMultipartUpload operation, if you want to use a different type of
// encryption setting for the uploaded parts, you can request that Amazon S3
// encrypts the object with a KMS key, an Amazon S3 managed key, or a
// customer-provided key. If the encryption setting in your request is different
// from the default encryption configuration of the destination bucket, the
// encryption setting in your request takes precedence. If you choose to provide
// your own encryption key, the request headers you provide in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
// requests must match the headers you used in the request to initiate the upload
// by using CreateMultipartUpload . You can request that Amazon S3 save the
// uploaded parts encrypted with server-side encryption with an Amazon S3 managed
// key (SSE-S3), an Key Management Service (KMS) key (SSE-KMS), or a
// customer-provided encryption key (SSE-C). To perform a multipart upload with
// encryption by using an Amazon Web Services KMS key, the requester must have
// permission to the kms:Decrypt and kms:GenerateDataKey* actions on the key.
// These permissions are required because Amazon S3 must decrypt and read data from
// the encrypted file parts before it completes the multipart upload. For more
// information, see Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions)
// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
// in the Amazon S3 User Guide. If your Identity and Access Management (IAM) user
// or role is in the same Amazon Web Services account as the KMS key, then you must
// have these permissions on the key policy. If your IAM user or role belongs to a
// different account than the key, then you must have the permissions on both the
// key policy and your IAM user or role. For more information, see Protecting Data
// Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html)
// . Access Permissions When copying an object, you can optionally specify the
// accounts or groups that should be granted specific permissions on the new
// object. There are two ways to grant the permissions using the request headers:
// - Specify a canned ACL with the x-amz-acl request header. For more
// information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
// .
// - Specify access permissions explicitly with the x-amz-grant-read ,
// x-amz-grant-read-acp , x-amz-grant-write-acp , and x-amz-grant-full-control
// headers. These parameters map to the set of permissions that Amazon S3 supports
// in an ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
// in the Amazon S3 User Guide. After you initiate a multipart upload and upload
// one or more parts, to stop being charged for storing the uploaded parts, you
// must either complete or abort the multipart upload. Amazon S3 frees up the space
// used to store the parts and stops charging you for storing them only after you
// either complete or abort a multipart upload. If you have configured a lifecycle
// rule to abort incomplete multipart uploads, the created multipart upload must be
// completed within the number of days specified in the bucket lifecycle
// configuration. Otherwise, the incomplete multipart upload becomes eligible for
// an abort action and Amazon S3 aborts the multipart upload. For more information,
// see Aborting Incomplete Multipart Uploads Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
// .
// - Directory buckets - S3 Lifecycle is not supported by directory buckets.
// - Directory buckets - For directory buckets, you must make requests for this
// API operation to the Zonal endpoint. These endpoints support
// virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name .
// Path-style requests are not supported. For more information, see Regional and
// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide.
//
// Request signing For request signing, multipart upload is just a series of
// regular requests. You initiate a multipart upload, send one or more requests to
// upload parts, and then complete the multipart upload process. You sign each
// request individually. There is nothing special about signing multipart upload
// requests. For more information about signing, see Authenticating Requests
// (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html)
// in the Amazon S3 User Guide. Permissions
// - General purpose bucket permissions - For information about the permissions
// required to use the multipart upload API, see Multipart upload and permissions (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html)
// in the Amazon S3 User Guide. To perform a multipart upload with encryption by
// using an Amazon Web Services KMS key, the requester must have permission to the
// kms:Decrypt and kms:GenerateDataKey* actions on the key. These permissions are
// required because Amazon S3 must decrypt and read data from the encrypted file
// parts before it completes the multipart upload. For more information, see
// Multipart upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions)
// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
// in the Amazon S3 User Guide.
// - Directory bucket permissions - To grant access to this API operation on a
// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
// API operation for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
// header, you can make API requests to this operation. After the session token
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
// .
//
// You can use either a canned ACL or specify access permissions explicitly. You
// cannot do both. Server-Side- Encryption-Specific Request Headers Amazon S3
// encrypts data by using server-side encryption with an Amazon S3 managed key
// (SSE-S3) by default. Server-side encryption is for data encryption at rest.
// Amazon S3 encrypts your data as it writes it to disks in its data centers and
// decrypts it when you access it. You can request that Amazon S3 encrypts data at
// rest by using server-side encryption with other key options. The option you use
// depends on whether you want to use KMS keys (SSE-KMS) or provide your own
// encryption keys (SSE-C).
// Encryption
// - General purpose buckets - Server-side encryption is for data encryption at
// rest. Amazon S3 encrypts your data as it writes it to disks in its data centers
// and decrypts it when you access it. Amazon S3 automatically encrypts all new
// objects that are uploaded to an S3 bucket. When doing a multipart upload, if you
// don't specify encryption information in your request, the encryption setting of
// the uploaded parts is set to the default encryption configuration of the
// destination bucket. By default, all buckets have a base level of encryption
// configuration that uses server-side encryption with Amazon S3 managed keys
// (SSE-S3). If the destination bucket has a default encryption configuration that
// uses server-side encryption with an Key Management Service (KMS) key (SSE-KMS),
// or a customer-provided encryption key (SSE-C), Amazon S3 uses the corresponding
// KMS key, or a customer-provided key to encrypt the uploaded parts. When you
// perform a CreateMultipartUpload operation, if you want to use a different type
// of encryption setting for the uploaded parts, you can request that Amazon S3
// encrypts the object with a different encryption key (such as an Amazon S3
// managed key, a KMS key, or a customer-provided key). When the encryption setting
// in your request is different from the default encryption configuration of the
// destination bucket, the encryption setting in your request takes precedence. If
// you choose to provide your own encryption key, the request headers you provide
// in UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
// and UploadPartCopy (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html)
// requests must match the headers you used in the CreateMultipartUpload request.
// - Use KMS keys (SSE-KMS) that include the Amazon Web Services managed key (
// aws/s3 ) and KMS customer managed keys stored in Key Management Service (KMS)
// If you want Amazon Web Services to manage the keys used to encrypt data,
// specify the following headers in the request.
// - x-amz-server-side-encryption
// - x-amz-server-side-encryption-aws-kms-key-id
// - x-amz-server-side-encryption-context If you specify
// x-amz-server-side-encryption:aws:kms , but don't provide
// - x-amz-server-side-encryption-context
// - If you specify x-amz-server-side-encryption:aws:kms , but don't provide
// x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web
// Services managed key ( aws/s3 key) in KMS to protect the data. All GET and PUT
// requests for an object protected by KMS fail if you don't make them by using
// Secure Sockets Layer (SSL), Transport Layer Security (TLS), or Signature Version
// 4. For more information about server-side encryption with KMS keys (SSE-KMS),
// see Protecting Data Using Server-Side Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
// .
// Services managed key ( aws/s3 key) in KMS to protect the data.
// - To perform a multipart upload with encryption by using an Amazon Web
// Services KMS key, the requester must have permission to the kms:Decrypt and
// kms:GenerateDataKey* actions on the key. These permissions are required
// because Amazon S3 must decrypt and read data from the encrypted file parts
// before it completes the multipart upload. For more information, see Multipart
// upload API and permissions (https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions)
// and Protecting data using server-side encryption with Amazon Web Services KMS (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
// in the Amazon S3 User Guide.
// - If your Identity and Access Management (IAM) user or role is in the same
// Amazon Web Services account as the KMS key, then you must have these permissions
// on the key policy. If your IAM user or role is in a different account from the
// key, then you must have the permissions on both the key policy and your IAM user
// or role.
// - All GET and PUT requests for an object protected by KMS fail if you don't
// make them by using Secure Sockets Layer (SSL), Transport Layer Security (TLS),
// or Signature Version 4. For information about configuring any of the officially
// supported Amazon Web Services SDKs and Amazon Web Services CLI, see
// Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
// in the Amazon S3 User Guide. For more information about server-side
// encryption with KMS keys (SSE-KMS), see Protecting Data Using Server-Side
// Encryption with KMS keys (https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html)
// in the Amazon S3 User Guide.
// - Use customer-provided encryption keys (SSE-C) If you want to manage your
// own encryption keys, provide all the following headers in the request.
// - x-amz-server-side-encryption-customer-algorithm
@ -120,55 +134,13 @@ import (
// server-side encryption with customer-provided encryption keys (SSE-C), see
// Protecting data using server-side encryption with customer-provided encryption
// keys (SSE-C) (https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html)
// .
// in the Amazon S3 User Guide.
// - Directory buckets -For directory buckets, only server-side encryption with
// Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
//
// Access-Control-List (ACL)-Specific Request Headers You also can use the
// following access controlrelated headers with this operation. By default, all
// objects are private. Only the owner has full access control. When adding a new
// object, you can grant permissions to individual Amazon Web Services accounts or
// to predefined groups defined by Amazon S3. These permissions are then added to
// the access control list (ACL) on the object. For more information, see Using
// ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html) .
// With this operation, you can grant access permissions using one of the following
// two methods:
// - Specify a canned ACL ( x-amz-acl ) — Amazon S3 supports a set of predefined
// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and
// permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
// .
// - Specify access permissions explicitly — To explicitly grant access
// permissions to specific Amazon Web Services accounts or groups, use the
// following headers. Each header maps to specific permissions that Amazon S3
// supports in an ACL. For more information, see Access Control List (ACL)
// Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) .
// In the header, you specify a list of grantees who get the specific permission.
// To grant permissions explicitly, use:
// - x-amz-grant-read
// - x-amz-grant-write
// - x-amz-grant-read-acp
// - x-amz-grant-write-acp
// - x-amz-grant-full-control You specify each grantee as a type=value pair,
// where the type is one of the following:
// - id if the value specified is the canonical user ID of an Amazon Web
// Services account
// - uri if you are granting permissions to a predefined group
// - emailAddress if the value specified is the email address of an Amazon Web
// Services account Using email addresses to specify a grantee is only supported in
// the following Amazon Web Services Regions:
// - US East (N. Virginia)
// - US West (N. California)
// - US West (Oregon)
// - Asia Pacific (Singapore)
// - Asia Pacific (Sydney)
// - Asia Pacific (Tokyo)
// - Europe (Ireland)
// - South America (São Paulo) For a list of all the Amazon S3 supported Regions
// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
// in the Amazon Web Services General Reference. For example, the following
// x-amz-grant-read header grants the Amazon Web Services accounts identified by
// account IDs permissions to read object data and its metadata:
// x-amz-grant-read: id="11112222333", id="444455556666"
//
// The following operations are related to CreateMultipartUpload :
// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
// related to CreateMultipartUpload :
// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
// - AbortMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html)
@ -191,16 +163,26 @@ func (c *Client) CreateMultipartUpload(ctx context.Context, params *CreateMultip
type CreateMultipartUploadInput struct {
// The name of the bucket to which to initiate the upload When using this action
// with an access point, you must direct requests to the access point hostname. The
// access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
// access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. When you use this action with Amazon S3 on
// Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on
// Outposts hostname takes the form
// The name of the bucket where the multipart upload is initiated and where the
// object is uploaded. Directory buckets - When you use this operation with a
// directory bucket, you must use virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the
// bucket name or specify the access point ARN. When using the access point ARN,
// you must direct requests to the access point hostname. The access point hostname
// takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. Access points and Object Lambda access points are
// not supported by directory buckets. S3 on Outposts - When you use this action
// with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
// hostname. The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
@ -215,22 +197,33 @@ type CreateMultipartUploadInput struct {
// This member is required.
Key *string
// The canned ACL to apply to the object. This action is not supported by Amazon
// S3 on Outposts.
// The canned ACL to apply to the object. Amazon S3 supports a set of predefined
// ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and
// permissions. For more information, see Canned ACL (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL)
// in the Amazon S3 User Guide. By default, all objects are private. Only the owner
// has full access control. When uploading an object, you can grant access
// permissions to individual Amazon Web Services accounts or to predefined groups
// defined by Amazon S3. These permissions are then added to the access control
// list (ACL) on the new object. For more information, see Using ACLs (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html)
// . One way to grant the permissions using the request headers is to specify a
// canned ACL with the x-amz-acl request header.
// - This functionality is not supported for directory buckets.
// - This functionality is not supported for Amazon S3 on Outposts.
ACL types.ObjectCannedACL
// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
// with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
// Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object
// encryption with SSE-KMS. Specifying this header with an object action doesnt
// affect bucket-level settings for S3 Bucket Key.
BucketKeyEnabled bool
// affect bucket-level settings for S3 Bucket Key. This functionality is not
// supported for directory buckets.
BucketKeyEnabled *bool
// Specifies caching behavior along the request/reply chain.
CacheControl *string
// Indicates the algorithm you want Amazon S3 to use to create the checksum for
// the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// Indicates the algorithm that you want Amazon S3 to use to create the checksum
// for the object. For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumAlgorithm types.ChecksumAlgorithm
@ -239,108 +232,226 @@ type CreateMultipartUploadInput struct {
// Specifies what content encodings have been applied to the object and thus what
// decoding mechanisms must be applied to obtain the media-type referenced by the
// Content-Type header field.
// Content-Type header field. For directory buckets, only the aws-chunked value is
// supported in this header field.
ContentEncoding *string
// The language the content is in.
// The language that the content is in.
ContentLanguage *string
// A standard MIME type describing the format of the object data.
ContentType *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
// The date and time at which the object is no longer cacheable.
Expires *time.Time
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. This
// action is not supported by Amazon S3 on Outposts.
// Specify access permissions explicitly to give the grantee READ, READ_ACP, and
// WRITE_ACP permissions on the object. By default, all objects are private. Only
// the owner has full access control. When uploading an object, you can use this
// header to explicitly grant access permissions to specific Amazon Web Services
// accounts or groups. This header maps to specific permissions that Amazon S3
// supports in an ACL. For more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
// in the Amazon S3 User Guide. You specify each grantee as a type=value pair,
// where the type is one of the following:
// - id if the value specified is the canonical user ID of an Amazon Web
// Services account
// - uri if you are granting permissions to a predefined group
// - emailAddress if the value specified is the email address of an Amazon Web
// Services account Using email addresses to specify a grantee is only supported in
// the following Amazon Web Services Regions:
// - US East (N. Virginia)
// - US West (N. California)
// - US West (Oregon)
// - Asia Pacific (Singapore)
// - Asia Pacific (Sydney)
// - Asia Pacific (Tokyo)
// - Europe (Ireland)
// - South America (São Paulo) For a list of all the Amazon S3 supported Regions
// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
// in the Amazon Web Services General Reference.
// For example, the following x-amz-grant-read header grants the Amazon Web
// Services accounts identified by account IDs permissions to read object data and
// its metadata: x-amz-grant-read: id="11112222333", id="444455556666"
// - This functionality is not supported for directory buckets.
// - This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string
// Allows grantee to read the object data and its metadata. This action is not
// supported by Amazon S3 on Outposts.
// Specify access permissions explicitly to allow grantee to read the object data
// and its metadata. By default, all objects are private. Only the owner has full
// access control. When uploading an object, you can use this header to explicitly
// grant access permissions to specific Amazon Web Services accounts or groups.
// This header maps to specific permissions that Amazon S3 supports in an ACL. For
// more information, see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
// in the Amazon S3 User Guide. You specify each grantee as a type=value pair,
// where the type is one of the following:
// - id if the value specified is the canonical user ID of an Amazon Web
// Services account
// - uri if you are granting permissions to a predefined group
// - emailAddress if the value specified is the email address of an Amazon Web
// Services account Using email addresses to specify a grantee is only supported in
// the following Amazon Web Services Regions:
// - US East (N. Virginia)
// - US West (N. California)
// - US West (Oregon)
// - Asia Pacific (Singapore)
// - Asia Pacific (Sydney)
// - Asia Pacific (Tokyo)
// - Europe (Ireland)
// - South America (São Paulo) For a list of all the Amazon S3 supported Regions
// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
// in the Amazon Web Services General Reference.
// For example, the following x-amz-grant-read header grants the Amazon Web
// Services accounts identified by account IDs permissions to read object data and
// its metadata: x-amz-grant-read: id="11112222333", id="444455556666"
// - This functionality is not supported for directory buckets.
// - This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string
// Allows grantee to read the object ACL. This action is not supported by Amazon
// S3 on Outposts.
// Specify access permissions explicitly to allows grantee to read the object ACL.
// By default, all objects are private. Only the owner has full access control.
// When uploading an object, you can use this header to explicitly grant access
// permissions to specific Amazon Web Services accounts or groups. This header maps
// to specific permissions that Amazon S3 supports in an ACL. For more information,
// see Access Control List (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
// in the Amazon S3 User Guide. You specify each grantee as a type=value pair,
// where the type is one of the following:
// - id if the value specified is the canonical user ID of an Amazon Web
// Services account
// - uri if you are granting permissions to a predefined group
// - emailAddress if the value specified is the email address of an Amazon Web
// Services account Using email addresses to specify a grantee is only supported in
// the following Amazon Web Services Regions:
// - US East (N. Virginia)
// - US West (N. California)
// - US West (Oregon)
// - Asia Pacific (Singapore)
// - Asia Pacific (Sydney)
// - Asia Pacific (Tokyo)
// - Europe (Ireland)
// - South America (São Paulo) For a list of all the Amazon S3 supported Regions
// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
// in the Amazon Web Services General Reference.
// For example, the following x-amz-grant-read header grants the Amazon Web
// Services accounts identified by account IDs permissions to read object data and
// its metadata: x-amz-grant-read: id="11112222333", id="444455556666"
// - This functionality is not supported for directory buckets.
// - This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string
// Allows grantee to write the ACL for the applicable object. This action is not
// supported by Amazon S3 on Outposts.
// Specify access permissions explicitly to allows grantee to allow grantee to
// write the ACL for the applicable object. By default, all objects are private.
// Only the owner has full access control. When uploading an object, you can use
// this header to explicitly grant access permissions to specific Amazon Web
// Services accounts or groups. This header maps to specific permissions that
// Amazon S3 supports in an ACL. For more information, see Access Control List
// (ACL) Overview (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html)
// in the Amazon S3 User Guide. You specify each grantee as a type=value pair,
// where the type is one of the following:
// - id if the value specified is the canonical user ID of an Amazon Web
// Services account
// - uri if you are granting permissions to a predefined group
// - emailAddress if the value specified is the email address of an Amazon Web
// Services account Using email addresses to specify a grantee is only supported in
// the following Amazon Web Services Regions:
// - US East (N. Virginia)
// - US West (N. California)
// - US West (Oregon)
// - Asia Pacific (Singapore)
// - Asia Pacific (Sydney)
// - Asia Pacific (Tokyo)
// - Europe (Ireland)
// - South America (São Paulo) For a list of all the Amazon S3 supported Regions
// and endpoints, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region)
// in the Amazon Web Services General Reference.
// For example, the following x-amz-grant-read header grants the Amazon Web
// Services accounts identified by account IDs permissions to read object data and
// its metadata: x-amz-grant-read: id="11112222333", id="444455556666"
// - This functionality is not supported for directory buckets.
// - This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string
// A map of metadata to store with the object in S3.
Metadata map[string]string
// Specifies whether you want to apply a legal hold to the uploaded object.
// Specifies whether you want to apply a legal hold to the uploaded object. This
// functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
// Specifies the Object Lock mode that you want to apply to the uploaded object.
// This functionality is not supported for directory buckets.
ObjectLockMode types.ObjectLockMode
// Specifies the date and time when you want the Object Lock to expire.
// Specifies the date and time when you want the Object Lock to expire. This
// functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets.
RequestPayer types.RequestPayer
// Specifies the algorithm to use to when encrypting the object (for example,
// AES256).
// Specifies the algorithm to use when encrypting the object (for example,
// AES256). This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use in
// encrypting data. This value is used to store the object and then it is
// discarded; Amazon S3 does not store the encryption key. The key must be
// appropriate for use with the algorithm specified in the
// x-amz-server-side-encryption-customer-algorithm header.
// x-amz-server-side-encryption-customer-algorithm header. This functionality is
// not supported for directory buckets.
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
// Specifies the 128-bit MD5 digest of the customer-provided encryption key
// according to RFC 1321. Amazon S3 uses this header for a message integrity check
// to ensure that the encryption key was transmitted without error. This
// functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// Specifies the Amazon Web Services KMS Encryption Context to use for object
// encryption. The value of this header is a base64-encoded UTF-8 string holding
// JSON with the encryption context key-value pairs.
// JSON with the encryption context key-value pairs. This functionality is not
// supported for directory buckets.
SSEKMSEncryptionContext *string
// Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption
// customer managed key to use for object encryption. All GET and PUT requests for
// an object protected by KMS will fail if they're not made via SSL or using SigV4.
// For information about configuring any of the officially supported Amazon Web
// Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version
// in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version)
// in the Amazon S3 User Guide.
// customer managed key to use for object encryption. This functionality is not
// supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when storing this object in Amazon S3
// (for example, AES256 , aws:kms ).
// The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side
// encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
// objects. The STANDARD storage class provides high durability and high
// availability. Depending on performance needs, you can specify a different
// Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
// more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// in the Amazon S3 User Guide.
// - For directory buckets, only the S3 Express One Zone storage class is
// supported to store newly created objects.
// - Amazon S3 on Outposts only uses the OUTPOSTS Storage Class.
StorageClass types.StorageClass
// The tag-set for the object. The tag-set must be encoded as URL Query parameters.
// The tag-set for the object. The tag-set must be encoded as URL Query
// parameters. This functionality is not supported for directory buckets.
Tagging *string
// If the bucket is configured as a website, redirects requests for this object to
// another object in the same bucket or to an external URL. Amazon S3 stores the
// value of this header in the object metadata.
// value of this header in the object metadata. This functionality is not supported
// for directory buckets.
WebsiteRedirectLocation *string
noSmithyDocumentSerde
@ -348,6 +459,7 @@ type CreateMultipartUploadInput struct {
func (in *CreateMultipartUploadInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.Key = in.Key
}
@ -359,36 +471,26 @@ type CreateMultipartUploadOutput struct {
// indicates when the initiated multipart upload becomes eligible for an abort
// operation. For more information, see Aborting Incomplete Multipart Uploads
// Using a Bucket Lifecycle Configuration (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
// . The response also includes the x-amz-abort-rule-id header that provides the
// ID of the lifecycle configuration rule that defines this action.
// in the Amazon S3 User Guide. The response also includes the x-amz-abort-rule-id
// header that provides the ID of the lifecycle configuration rule that defines the
// abort action. This functionality is not supported for directory buckets.
AbortDate *time.Time
// This header is returned along with the x-amz-abort-date header. It identifies
// the applicable lifecycle configuration rule that defines the action to abort
// incomplete multipart uploads.
// incomplete multipart uploads. This functionality is not supported for directory
// buckets.
AbortRuleId *string
// The name of the bucket to which the multipart upload was initiated. Does not
// return the access point ARN or access point alias if used. When using this
// action with an access point, you must direct requests to the access point
// hostname. The access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
// access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. When you use this action with Amazon S3 on
// Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on
// Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see What is S3 on Outposts? (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html)
// in the Amazon S3 User Guide.
// return the access point ARN or access point alias if used. Access points are not
// supported by directory buckets.
Bucket *string
// Indicates whether the multipart upload uses an S3 Bucket Key for server-side
// encryption with Key Management Service (KMS) keys (SSE-KMS).
BucketKeyEnabled bool
// encryption with Key Management Service (KMS) keys (SSE-KMS). This functionality
// is not supported for directory buckets.
BucketKeyEnabled *bool
// The algorithm that was used to create a checksum of the object.
ChecksumAlgorithm types.ChecksumAlgorithm
@ -397,30 +499,35 @@ type CreateMultipartUploadOutput struct {
Key *string
// If present, indicates that the requester was successfully charged for the
// request.
// request. This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header confirming the encryption
// algorithm used.
// requested, the response will include this header to confirm the encryption
// algorithm that's used. This functionality is not supported for directory
// buckets.
SSECustomerAlgorithm *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to provide round-trip message
// integrity verification of the customer-provided encryption key.
// requested, the response will include this header to provide the round-trip
// message integrity verification of the customer-provided encryption key. This
// functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// If present, specifies the Amazon Web Services KMS Encryption Context to use for
// If present, indicates the Amazon Web Services KMS Encryption Context to use for
// object encryption. The value of this header is a base64-encoded UTF-8 string
// holding JSON with the encryption context key-value pairs.
// holding JSON with the encryption context key-value pairs. This functionality is
// not supported for directory buckets.
SSEKMSEncryptionContext *string
// If present, specifies the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object.
// If present, indicates the ID of the Key Management Service (KMS) symmetric
// encryption customer managed key that was used for the object. This functionality
// is not supported for directory buckets.
SSEKMSKeyId *string
// The server-side encryption algorithm used when storing this object in Amazon S3
// (for example, AES256 , aws:kms ).
// The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256 , aws:kms ). For directory buckets, only server-side
// encryption with Amazon S3 managed keys (SSE-S3) ( AES256 ) is supported.
ServerSideEncryption types.ServerSideEncryption
// ID for the initiated multipart upload.
@ -487,6 +594,9 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware.
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpCreateMultipartUploadValidationMiddleware(stack); err != nil {
return err
}
@ -520,6 +630,9 @@ func (c *Client) addOperationCreateMultipartUploadMiddlewares(stack *middleware.
if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
return err
}
if err = addSetCreateMPUChecksumAlgorithm(stack); err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,260 @@
// Code generated by smithy-go-codegen DO NOT EDIT.
package s3
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Creates a session that establishes temporary security credentials to support
// fast authentication and authorization for the Zonal endpoint APIs on directory
// buckets. For more information about Zonal endpoint APIs that include the
// Availability Zone in the request endpoint, see S3 Express One Zone APIs (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-APIs.html)
// in the Amazon S3 User Guide. To make Zonal endpoint API requests on a directory
// bucket, use the CreateSession API operation. Specifically, you grant
// s3express:CreateSession permission to a bucket in a bucket policy or an IAM
// identity-based policy. Then, you use IAM credentials to make the CreateSession
// API request on the bucket, which returns temporary security credentials that
// include the access key ID, secret access key, session token, and expiration.
// These credentials have associated permissions to access the Zonal endpoint APIs.
// After the session is created, you dont need to use other policies to grant
// permissions to each Zonal endpoint API individually. Instead, in your Zonal
// endpoint API requests, you sign your requests by applying the temporary security
// credentials of the session to the request headers and following the SigV4
// protocol for authentication. You also apply the session token to the
// x-amz-s3session-token request header for authorization. Temporary security
// credentials are scoped to the bucket and expire after 5 minutes. After the
// expiration time, any calls that you make with those credentials will fail. You
// must use IAM credentials again to make a CreateSession API request that
// generates a new set of temporary credentials for use. Temporary credentials
// cannot be extended or refreshed beyond the original specified interval. If you
// use Amazon Web Services SDKs, SDKs handle the session token refreshes
// automatically to avoid service interruptions when a session expires. We
// recommend that you use the Amazon Web Services SDKs to initiate and manage
// requests to the CreateSession API. For more information, see Performance
// guidelines and design patterns (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-optimizing-performance-guidelines-design-patterns.html#s3-express-optimizing-performance-session-authentication)
// in the Amazon S3 User Guide.
// - You must make requests for this API operation to the Zonal endpoint. These
// endpoints support virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests
// are not supported. For more information, see Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide.
// - CopyObject API operation - Unlike other Zonal endpoint APIs, the CopyObject
// API operation doesn't use the temporary security credentials returned from the
// CreateSession API operation for authentication and authorization. For
// information about authentication and authorization of the CopyObject API
// operation on directory buckets, see CopyObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html)
// .
// - HeadBucket API operation - Unlike other Zonal endpoint APIs, the HeadBucket
// API operation doesn't use the temporary security credentials returned from the
// CreateSession API operation for authentication and authorization. For
// information about authentication and authorization of the HeadBucket API
// operation on directory buckets, see HeadBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadBucket.html)
// .
//
// Permissions To obtain temporary security credentials, you must create a bucket
// policy or an IAM identity-based policy that grants s3express:CreateSession
// permission to the bucket. In a policy, you can have the s3express:SessionMode
// condition key to control who can create a ReadWrite or ReadOnly session. For
// more information about ReadWrite or ReadOnly sessions, see
// x-amz-create-session-mode (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html#API_CreateSession_RequestParameters)
// . For example policies, see Example bucket policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html)
// and Amazon Web Services Identity and Access Management (IAM) identity-based
// policies for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html)
// in the Amazon S3 User Guide. To grant cross-account access to Zonal endpoint
// APIs, the bucket policy should also grant both accounts the
// s3express:CreateSession permission. HTTP Host header syntax Directory buckets -
// The HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com .
func (c *Client) CreateSession(ctx context.Context, params *CreateSessionInput, optFns ...func(*Options)) (*CreateSessionOutput, error) {
if params == nil {
params = &CreateSessionInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateSession", params, optFns, c.addOperationCreateSessionMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateSessionOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateSessionInput struct {
// The name of the bucket that you create a session for.
//
// This member is required.
Bucket *string
// Specifies the mode of the session that will be created, either ReadWrite or
// ReadOnly . By default, a ReadWrite session is created. A ReadWrite session is
// capable of executing all the Zonal endpoint APIs on a directory bucket. A
// ReadOnly session is constrained to execute the following Zonal endpoint APIs:
// GetObject , HeadObject , ListObjectsV2 , GetObjectAttributes , ListParts , and
// ListMultipartUploads .
SessionMode types.SessionMode
noSmithyDocumentSerde
}
func (in *CreateSessionInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.DisableS3ExpressSessionAuth = ptr.Bool(true)
}
type CreateSessionOutput struct {
// The established temporary security credentials for the created session..
//
// This member is required.
Credentials *types.SessionCredentials
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateSessionMiddlewares(stack *middleware.Stack, options Options) (err error) {
if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
return err
}
err = stack.Serialize.Add(&awsRestxml_serializeOpCreateSession{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestxml_deserializeOpCreateSession{}, middleware.After)
if err != nil {
return err
}
if err := addProtocolFinalizerMiddlewares(stack, options, "CreateSession"); err != nil {
return fmt.Errorf("add protocol finalizers: %v", err)
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpCreateSessionValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateSession(options.Region), middleware.Before); err != nil {
return err
}
if err = addMetadataRetrieverMiddleware(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecursionDetection(stack); err != nil {
return err
}
if err = addCreateSessionUpdateEndpoint(stack, options); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
return err
}
if err = disableAcceptEncodingGzip(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
return err
}
return nil
}
func (v *CreateSessionInput) bucket() (string, bool) {
if v.Bucket == nil {
return "", false
}
return *v.Bucket, true
}
func newServiceMetadataMiddleware_opCreateSession(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
OperationName: "CreateSession",
}
}
// getCreateSessionBucketMember returns a pointer to string denoting a provided
// bucket member valueand a boolean indicating if the input has a modeled bucket
// name,
func getCreateSessionBucketMember(input interface{}) (*string, bool) {
in := input.(*CreateSessionInput)
if in.Bucket == nil {
return nil, false
}
return in.Bucket, true
}
func addCreateSessionUpdateEndpoint(stack *middleware.Stack, options Options) error {
return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
Accessor: s3cust.UpdateEndpointParameterAccessor{
GetBucketFromInput: getCreateSessionBucketMember,
},
UsePathStyle: options.UsePathStyle,
UseAccelerate: options.UseAccelerate,
SupportsAccelerate: true,
TargetS3ObjectLambda: false,
EndpointResolver: options.EndpointResolver,
EndpointResolverOptions: options.EndpointOptions,
UseARNRegion: options.UseARNRegion,
DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
})
}

View file

@ -9,12 +9,37 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Deletes the S3 bucket. All objects (including all object versions and delete
// markers) in the bucket must be deleted before the bucket itself can be deleted.
// The following operations are related to DeleteBucket :
// - Directory buckets - If multipart uploads in a directory bucket are in
// progress, you can't delete the bucket until all the in-progress multipart
// uploads are aborted or completed.
// - Directory buckets - For directory buckets, you must make requests for this
// API operation to the Regional endpoint. These endpoints support path-style
// requests in the format
// https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. For more information, see
// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide.
//
// Permissions
// - General purpose bucket permissions - You must have the s3:DeleteBucket
// permission on the specified bucket in a policy.
// - Directory bucket permissions - You must have the s3express:DeleteBucket
// permission in an IAM identity-based policy instead of a bucket policy.
// Cross-account access to this API operation isn't supported. This operation can
// only be performed by the Amazon Web Services account that owns the resource. For
// more information about directory bucket policies and permissions, see Amazon
// Web Services Identity and Access Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
// in the Amazon S3 User Guide.
//
// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
// s3express-control.region.amazonaws.com . The following operations are related to
// DeleteBucket :
// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*Options)) (*DeleteBucketOutput, error) {
@ -34,14 +59,24 @@ func (c *Client) DeleteBucket(ctx context.Context, params *DeleteBucketInput, op
type DeleteBucketInput struct {
// Specifies the bucket being deleted.
// Specifies the bucket being deleted. Directory buckets - When you use this
// operation with a directory bucket, you must use path-style requests in the
// format https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide
//
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied). For directory buckets, this header
// is not supported in this API operation. If you specify this header, the request
// fails with the HTTP status code 501 Not Implemented .
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -49,7 +84,7 @@ type DeleteBucketInput struct {
func (in *DeleteBucketInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketOutput struct {
@ -114,6 +149,9 @@ func (c *Client) addOperationDeleteBucketMiddlewares(stack *middleware.Stack, op
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,13 +9,15 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Deletes an analytics configuration for the bucket (specified by the analytics
// configuration ID). To use this operation, you must have permissions to perform
// the s3:PutAnalyticsConfiguration action. The bucket owner has this permission
// by default. The bucket owner can grant this permission to others. For more
// This operation is not supported by directory buckets. Deletes an analytics
// configuration for the bucket (specified by the analytics configuration ID). To
// use this operation, you must have permissions to perform the
// s3:PutAnalyticsConfiguration action. The bucket owner has this permission by
// default. The bucket owner can grant this permission to others. For more
// information about permissions, see Permissions Related to Bucket Subresource
// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
@ -52,9 +54,9 @@ type DeleteBucketAnalyticsConfigurationInput struct {
// This member is required.
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -62,7 +64,7 @@ type DeleteBucketAnalyticsConfigurationInput struct {
func (in *DeleteBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketAnalyticsConfigurationOutput struct {
@ -127,6 +129,9 @@ func (c *Client) addOperationDeleteBucketAnalyticsConfigurationMiddlewares(stack
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,13 +9,15 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Deletes the cors configuration information set for the bucket. To use this
// operation, you must have permission to perform the s3:PutBucketCORS action. The
// bucket owner has this permission by default and can grant this permission to
// others. For information about cors , see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)
// This operation is not supported by directory buckets. Deletes the cors
// configuration information set for the bucket. To use this operation, you must
// have permission to perform the s3:PutBucketCORS action. The bucket owner has
// this permission by default and can grant this permission to others. For
// information about cors , see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)
// in the Amazon S3 User Guide. Related Resources
// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html)
// - RESTOPTIONSobject (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html)
@ -41,9 +43,9 @@ type DeleteBucketCorsInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -51,7 +53,7 @@ type DeleteBucketCorsInput struct {
func (in *DeleteBucketCorsInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketCorsOutput struct {
@ -116,6 +118,9 @@ func (c *Client) addOperationDeleteBucketCorsMiddlewares(stack *middleware.Stack
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketCorsValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,13 +9,14 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// This implementation of the DELETE action resets the default encryption for the
// bucket as server-side encryption with Amazon S3 managed keys (SSE-S3). For
// information about the bucket default encryption feature, see Amazon S3 Bucket
// Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
// This operation is not supported by directory buckets. This implementation of
// the DELETE action resets the default encryption for the bucket as server-side
// encryption with Amazon S3 managed keys (SSE-S3). For information about the
// bucket default encryption feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
// in the Amazon S3 User Guide. To use this operation, you must have permissions to
// perform the s3:PutEncryptionConfiguration action. The bucket owner has this
// permission by default. The bucket owner can grant this permission to others. For
@ -49,9 +50,9 @@ type DeleteBucketEncryptionInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -59,7 +60,7 @@ type DeleteBucketEncryptionInput struct {
func (in *DeleteBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketEncryptionOutput struct {
@ -124,6 +125,9 @@ func (c *Client) addOperationDeleteBucketEncryptionMiddlewares(stack *middleware
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketEncryptionValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,11 +9,13 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Deletes the S3 Intelligent-Tiering configuration from the specified bucket. The
// S3 Intelligent-Tiering storage class is designed to optimize storage costs by
// This operation is not supported by directory buckets. Deletes the S3
// Intelligent-Tiering configuration from the specified bucket. The S3
// Intelligent-Tiering storage class is designed to optimize storage costs by
// automatically moving data to the most cost-effective storage access tier,
// without performance impact or operational overhead. S3 Intelligent-Tiering
// delivers automatic cost savings in three low latency and high throughput access
@ -63,7 +65,7 @@ type DeleteBucketIntelligentTieringConfigurationInput struct {
func (in *DeleteBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketIntelligentTieringConfigurationOutput struct {
@ -128,6 +130,9 @@ func (c *Client) addOperationDeleteBucketIntelligentTieringConfigurationMiddlewa
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,15 +9,16 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Deletes an inventory configuration (identified by the inventory ID) from the
// bucket. To use this operation, you must have permissions to perform the
// s3:PutInventoryConfiguration action. The bucket owner has this permission by
// default. The bucket owner can grant this permission to others. For more
// information about permissions, see Permissions Related to Bucket Subresource
// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// This operation is not supported by directory buckets. Deletes an inventory
// configuration (identified by the inventory ID) from the bucket. To use this
// operation, you must have permissions to perform the s3:PutInventoryConfiguration
// action. The bucket owner has this permission by default. The bucket owner can
// grant this permission to others. For more information about permissions, see
// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
// . Operations related to DeleteBucketInventoryConfiguration include:
@ -51,9 +52,9 @@ type DeleteBucketInventoryConfigurationInput struct {
// This member is required.
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -61,7 +62,7 @@ type DeleteBucketInventoryConfigurationInput struct {
func (in *DeleteBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketInventoryConfigurationOutput struct {
@ -126,6 +127,9 @@ func (c *Client) addOperationDeleteBucketInventoryConfigurationMiddlewares(stack
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketInventoryConfigurationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,17 +9,19 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Deletes the lifecycle configuration from the specified bucket. Amazon S3
// removes all the lifecycle configuration rules in the lifecycle subresource
// associated with the bucket. Your objects never expire, and Amazon S3 no longer
// automatically deletes any objects on the basis of rules contained in the deleted
// lifecycle configuration. To use this operation, you must have permission to
// perform the s3:PutLifecycleConfiguration action. By default, the bucket owner
// has this permission and the bucket owner can grant this permission to others.
// There is usually some time lag before lifecycle configuration deletion is fully
// This operation is not supported by directory buckets. Deletes the lifecycle
// configuration from the specified bucket. Amazon S3 removes all the lifecycle
// configuration rules in the lifecycle subresource associated with the bucket.
// Your objects never expire, and Amazon S3 no longer automatically deletes any
// objects on the basis of rules contained in the deleted lifecycle configuration.
// To use this operation, you must have permission to perform the
// s3:PutLifecycleConfiguration action. By default, the bucket owner has this
// permission and the bucket owner can grant this permission to others. There is
// usually some time lag before lifecycle configuration deletion is fully
// propagated to all the Amazon S3 systems. For more information about the object
// expiration, see Elements to Describe Lifecycle Actions (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions)
// . Related actions include:
@ -47,9 +49,9 @@ type DeleteBucketLifecycleInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -57,7 +59,7 @@ type DeleteBucketLifecycleInput struct {
func (in *DeleteBucketLifecycleInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketLifecycleOutput struct {
@ -122,6 +124,9 @@ func (c *Client) addOperationDeleteBucketLifecycleMiddlewares(stack *middleware.
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketLifecycleValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,16 +9,18 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Deletes a metrics configuration for the Amazon CloudWatch request metrics
// (specified by the metrics configuration ID) from the bucket. Note that this
// doesn't include the daily storage metrics. To use this operation, you must have
// permissions to perform the s3:PutMetricsConfiguration action. The bucket owner
// has this permission by default. The bucket owner can grant this permission to
// others. For more information about permissions, see Permissions Related to
// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// This operation is not supported by directory buckets. Deletes a metrics
// configuration for the Amazon CloudWatch request metrics (specified by the
// metrics configuration ID) from the bucket. Note that this doesn't include the
// daily storage metrics. To use this operation, you must have permissions to
// perform the s3:PutMetricsConfiguration action. The bucket owner has this
// permission by default. The bucket owner can grant this permission to others. For
// more information about permissions, see Permissions Related to Bucket
// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
// . For information about CloudWatch request metrics for Amazon S3, see
// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
@ -55,9 +57,9 @@ type DeleteBucketMetricsConfigurationInput struct {
// This member is required.
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -65,7 +67,7 @@ type DeleteBucketMetricsConfigurationInput struct {
func (in *DeleteBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketMetricsConfigurationOutput struct {
@ -130,6 +132,9 @@ func (c *Client) addOperationDeleteBucketMetricsConfigurationMiddlewares(stack *
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketMetricsConfigurationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,12 +9,14 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Removes OwnershipControls for an Amazon S3 bucket. To use this operation, you
// must have the s3:PutBucketOwnershipControls permission. For more information
// about Amazon S3 permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
// This operation is not supported by directory buckets. Removes OwnershipControls
// for an Amazon S3 bucket. To use this operation, you must have the
// s3:PutBucketOwnershipControls permission. For more information about Amazon S3
// permissions, see Specifying Permissions in a Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html)
// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html)
// . The following operations are related to DeleteBucketOwnershipControls :
// - GetBucketOwnershipControls
@ -41,9 +43,9 @@ type DeleteBucketOwnershipControlsInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -51,7 +53,7 @@ type DeleteBucketOwnershipControlsInput struct {
func (in *DeleteBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketOwnershipControlsOutput struct {
@ -116,6 +118,9 @@ func (c *Client) addOperationDeleteBucketOwnershipControlsMiddlewares(stack *mid
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketOwnershipControlsValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,26 +9,46 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// This implementation of the DELETE action uses the policy subresource to delete
// the policy of a specified bucket. If you are using an identity other than the
// root user of the Amazon Web Services account that owns the bucket, the calling
// identity must have the DeleteBucketPolicy permissions on the specified bucket
// and belong to the bucket owner's account to use this operation. If you don't
// have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied
// error. If you have the correct permissions, but you're not using an identity
// that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not
// Allowed error. To ensure that bucket owners don't inadvertently lock themselves
// out of their own buckets, the root principal in a bucket owner's Amazon Web
// Services account can perform the GetBucketPolicy , PutBucketPolicy , and
// DeleteBucketPolicy API actions, even if their bucket policy explicitly denies
// the root principal's access. Bucket owner root principals can only be blocked
// from performing these API actions by VPC endpoint policies and Amazon Web
// Services Organizations policies. For more information about bucket policies, see
// Using Bucket Policies and UserPolicies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html)
// . The following operations are related to DeleteBucketPolicy
// Deletes the policy of a specified bucket. Directory buckets - For directory
// buckets, you must make requests for this API operation to the Regional endpoint.
// These endpoints support path-style requests in the format
// https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. For more information, see
// Regional and Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide. Permissions If you are using an identity other than
// the root user of the Amazon Web Services account that owns the bucket, the
// calling identity must both have the DeleteBucketPolicy permissions on the
// specified bucket and belong to the bucket owner's account in order to use this
// operation. If you don't have DeleteBucketPolicy permissions, Amazon S3 returns
// a 403 Access Denied error. If you have the correct permissions, but you're not
// using an identity that belongs to the bucket owner's account, Amazon S3 returns
// a 405 Method Not Allowed error. To ensure that bucket owners don't
// inadvertently lock themselves out of their own buckets, the root principal in a
// bucket owner's Amazon Web Services account can perform the GetBucketPolicy ,
// PutBucketPolicy , and DeleteBucketPolicy API actions, even if their bucket
// policy explicitly denies the root principal's access. Bucket owner root
// principals can only be blocked from performing these API actions by VPC endpoint
// policies and Amazon Web Services Organizations policies.
// - General purpose bucket permissions - The s3:DeleteBucketPolicy permission is
// required in a policy. For more information about general purpose buckets bucket
// policies, see Using Bucket Policies and User Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html)
// in the Amazon S3 User Guide.
// - Directory bucket permissions - To grant access to this API operation, you
// must have the s3express:DeleteBucketPolicy permission in an IAM identity-based
// policy instead of a bucket policy. Cross-account access to this API operation
// isn't supported. This operation can only be performed by the Amazon Web Services
// account that owns the resource. For more information about directory bucket
// policies and permissions, see Amazon Web Services Identity and Access
// Management (IAM) for S3 Express One Zone (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam.html)
// in the Amazon S3 User Guide.
//
// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
// s3express-control.region.amazonaws.com . The following operations are related to
// DeleteBucketPolicy
// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
// - DeleteObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPolicyInput, optFns ...func(*Options)) (*DeleteBucketPolicyOutput, error) {
@ -48,14 +68,24 @@ func (c *Client) DeleteBucketPolicy(ctx context.Context, params *DeleteBucketPol
type DeleteBucketPolicyInput struct {
// The bucket name.
// The bucket name. Directory buckets - When you use this operation with a
// directory bucket, you must use path-style requests in the format
// https://s3express-control.region_code.amazonaws.com/bucket-name .
// Virtual-hosted-style requests aren't supported. Directory bucket names must be
// unique in the chosen Availability Zone. Bucket names must also follow the format
// bucket_base_name--az_id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide
//
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied). For directory buckets, this header
// is not supported in this API operation. If you specify this header, the request
// fails with the HTTP status code 501 Not Implemented .
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -63,7 +93,7 @@ type DeleteBucketPolicyInput struct {
func (in *DeleteBucketPolicyInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketPolicyOutput struct {
@ -128,6 +158,9 @@ func (c *Client) addOperationDeleteBucketPolicyMiddlewares(stack *middleware.Sta
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketPolicyValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,14 +9,15 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Deletes the replication configuration from the bucket. To use this operation,
// you must have permissions to perform the s3:PutReplicationConfiguration action.
// The bucket owner has these permissions by default and can grant it to others.
// For more information about permissions, see Permissions Related to Bucket
// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// This operation is not supported by directory buckets. Deletes the replication
// configuration from the bucket. To use this operation, you must have permissions
// to perform the s3:PutReplicationConfiguration action. The bucket owner has
// these permissions by default and can grant it to others. For more information
// about permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
// . It can take a while for the deletion of a replication configuration to fully
// propagate. For information about replication configuration, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html)
@ -46,9 +47,9 @@ type DeleteBucketReplicationInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -56,7 +57,7 @@ type DeleteBucketReplicationInput struct {
func (in *DeleteBucketReplicationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketReplicationOutput struct {
@ -121,6 +122,9 @@ func (c *Client) addOperationDeleteBucketReplicationMiddlewares(stack *middlewar
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketReplicationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,13 +9,15 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Deletes the tags from the bucket. To use this operation, you must have
// permission to perform the s3:PutBucketTagging action. By default, the bucket
// owner has this permission and can grant this permission to others. The following
// operations are related to DeleteBucketTagging :
// This operation is not supported by directory buckets. Deletes the tags from the
// bucket. To use this operation, you must have permission to perform the
// s3:PutBucketTagging action. By default, the bucket owner has this permission and
// can grant this permission to others. The following operations are related to
// DeleteBucketTagging :
// - GetBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html)
// - PutBucketTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html)
func (c *Client) DeleteBucketTagging(ctx context.Context, params *DeleteBucketTaggingInput, optFns ...func(*Options)) (*DeleteBucketTaggingOutput, error) {
@ -40,9 +42,9 @@ type DeleteBucketTaggingInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -50,7 +52,7 @@ type DeleteBucketTaggingInput struct {
func (in *DeleteBucketTaggingInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketTaggingOutput struct {
@ -115,6 +117,9 @@ func (c *Client) addOperationDeleteBucketTaggingMiddlewares(stack *middleware.St
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketTaggingValidationMiddleware(stack); err != nil {
return err
}

View file

@ -9,20 +9,21 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// This action removes the website configuration for a bucket. Amazon S3 returns a
// 200 OK response upon successfully deleting a website configuration on the
// specified bucket. You will get a 200 OK response if the website configuration
// you are trying to delete does not exist on the bucket. Amazon S3 returns a 404
// response if the bucket specified in the request does not exist. This DELETE
// action requires the S3:DeleteBucketWebsite permission. By default, only the
// bucket owner can delete the website configuration attached to a bucket. However,
// bucket owners can grant other users permission to delete the website
// configuration by writing a bucket policy granting them the
// S3:DeleteBucketWebsite permission. For more information about hosting websites,
// see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
// This operation is not supported by directory buckets. This action removes the
// website configuration for a bucket. Amazon S3 returns a 200 OK response upon
// successfully deleting a website configuration on the specified bucket. You will
// get a 200 OK response if the website configuration you are trying to delete
// does not exist on the bucket. Amazon S3 returns a 404 response if the bucket
// specified in the request does not exist. This DELETE action requires the
// S3:DeleteBucketWebsite permission. By default, only the bucket owner can delete
// the website configuration attached to a bucket. However, bucket owners can grant
// other users permission to delete the website configuration by writing a bucket
// policy granting them the S3:DeleteBucketWebsite permission. For more
// information about hosting websites, see Hosting Websites on Amazon S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html)
// . The following operations are related to DeleteBucketWebsite :
// - GetBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html)
// - PutBucketWebsite (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html)
@ -48,9 +49,9 @@ type DeleteBucketWebsiteInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -58,7 +59,7 @@ type DeleteBucketWebsiteInput struct {
func (in *DeleteBucketWebsiteInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeleteBucketWebsiteOutput struct {
@ -123,6 +124,9 @@ func (c *Client) addOperationDeleteBucketWebsiteMiddlewares(stack *middleware.St
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteBucketWebsiteValidationMiddleware(stack); err != nil {
return err
}

View file

@ -13,24 +13,69 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Removes the null version (if there is one) of an object and inserts a delete
// marker, which becomes the latest version of the object. If there isn't a null
// version, Amazon S3 does not remove any objects but will still respond that the
// command was successful. To remove a specific version, you must use the version
// Id subresource. Using this subresource permanently deletes the version. If the
// object deleted is a delete marker, Amazon S3 sets the response header,
// x-amz-delete-marker , to true. If the object you want to delete is in a bucket
// where the bucket versioning configuration is MFA Delete enabled, you must
// include the x-amz-mfa request header in the DELETE versionId request. Requests
// that include x-amz-mfa must use HTTPS. For more information about MFA Delete,
// see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html)
// . To see sample requests that use versioning, see Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete)
// . You can delete objects by explicitly calling DELETE Object or configure its
// lifecycle ( PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)
// Removes an object from a bucket. The behavior depends on the bucket's
// versioning state:
//
// - If versioning is enabled, the operation removes the null version (if there
// is one) of an object and inserts a delete marker, which becomes the latest
// version of the object. If there isn't a null version, Amazon S3 does not remove
// any objects but will still respond that the command was successful.
//
// - If versioning is suspended or not enabled, the operation permanently
// deletes the object.
//
// - Directory buckets - S3 Versioning isn't enabled and supported for directory
// buckets. For this API operation, only the null value of the version ID is
// supported by directory buckets. You can only specify null to the versionId
// query parameter in the request.
//
// - Directory buckets - For directory buckets, you must make requests for this
// API operation to the Zonal endpoint. These endpoints support
// virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name .
// Path-style requests are not supported. For more information, see Regional and
// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide.
//
// To remove a specific version, you must use the versionId query parameter. Using
// this query parameter permanently deletes the version. If the object deleted is a
// delete marker, Amazon S3 sets the response header x-amz-delete-marker to true.
// If the object you want to delete is in a bucket where the bucket versioning
// configuration is MFA Delete enabled, you must include the x-amz-mfa request
// header in the DELETE versionId request. Requests that include x-amz-mfa must
// use HTTPS. For more information about MFA Delete, see Using MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html)
// in the Amazon S3 User Guide. To see sample requests that use versioning, see
// Sample Request (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete)
// . Directory buckets - MFA delete is not supported by directory buckets. You can
// delete objects by explicitly calling DELETE Object or calling (
// PutBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html)
// ) to enable Amazon S3 to remove them for you. If you want to block users or
// accounts from removing or deleting objects from your bucket, you must deny them
// the s3:DeleteObject , s3:DeleteObjectVersion , and s3:PutLifeCycleConfiguration
// actions. The following action is related to DeleteObject :
// actions. Directory buckets - S3 Lifecycle is not supported by directory buckets.
// Permissions
// - General purpose bucket permissions - The following permissions are required
// in your policies when your DeleteObjects request includes specific headers.
// - s3:DeleteObject - To delete an object from a bucket, you must always have
// the s3:DeleteObject permission.
// - s3:DeleteObjectVersion - To delete a specific version of an object from a
// versiong-enabled bucket, you must have the s3:DeleteObjectVersion permission.
// - Directory bucket permissions - To grant access to this API operation on a
// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
// API operation for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
// header, you can make API requests to this operation. After the session token
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
// .
//
// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
// Bucket_name.s3express-az_id.region.amazonaws.com . The following action is
// related to DeleteObject :
// - PutObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*Options)) (*DeleteObjectOutput, error) {
if params == nil {
@ -49,16 +94,26 @@ func (c *Client) DeleteObject(ctx context.Context, params *DeleteObjectInput, op
type DeleteObjectInput struct {
// The bucket name of the bucket containing the object. When using this action
// with an access point, you must direct requests to the access point hostname. The
// access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
// access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. When you use this action with Amazon S3 on
// Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on
// Outposts hostname takes the form
// The bucket name of the bucket containing the object. Directory buckets - When
// you use this operation with a directory bucket, you must use
// virtual-hosted-style requests in the format
// Bucket_name.s3express-az_id.region.amazonaws.com . Path-style requests are not
// supported. Directory bucket names must be unique in the chosen Availability
// Zone. Bucket names must follow the format bucket_base_name--az-id--x-s3 (for
// example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3 ). For information about bucket
// naming restrictions, see Directory bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the
// bucket name or specify the access point ARN. When using the access point ARN,
// you must direct requests to the access point hostname. The access point hostname
// takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. Access points and Object Lambda access points are
// not supported by directory buckets. S3 on Outposts - When you use this action
// with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
// hostname. The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
@ -75,30 +130,34 @@ type DeleteObjectInput struct {
// Indicates whether S3 Object Lock should bypass Governance-mode restrictions to
// process this operation. To use this header, you must have the
// s3:BypassGovernanceRetention permission.
BypassGovernanceRetention bool
// s3:BypassGovernanceRetention permission. This functionality is not supported for
// directory buckets.
BypassGovernanceRetention *bool
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
// The concatenation of the authentication device's serial number, a space, and
// the value that is displayed on your authentication device. Required to
// permanently delete a versioned object if versioning is configured with MFA
// delete enabled.
// delete enabled. This functionality is not supported for directory buckets.
MFA *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets.
RequestPayer types.RequestPayer
// VersionId used to reference a specific version of the object.
// Version ID used to reference a specific version of the object. For directory
// buckets in this API operation, only the null value of the version ID is
// supported.
VersionId *string
noSmithyDocumentSerde
@ -106,6 +165,7 @@ type DeleteObjectInput struct {
func (in *DeleteObjectInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.Key = in.Key
}
@ -114,15 +174,16 @@ type DeleteObjectOutput struct {
// Indicates whether the specified object version that was permanently deleted was
// (true) or was not (false) a delete marker before deletion. In a simple DELETE,
// this header indicates whether (true) or not (false) the current version of the
// object is a delete marker.
DeleteMarker bool
// object is a delete marker. This functionality is not supported for directory
// buckets.
DeleteMarker *bool
// If present, indicates that the requester was successfully charged for the
// request.
// request. This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Returns the version ID of the delete marker created as a result of the DELETE
// operation.
// operation. This functionality is not supported for directory buckets.
VersionId *string
// Metadata pertaining to the operation's result.
@ -186,6 +247,9 @@ func (c *Client) addOperationDeleteObjectMiddlewares(stack *middleware.Stack, op
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteObjectValidationMiddleware(stack); err != nil {
return err
}

View file

@ -12,8 +12,9 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Removes the entire tag set from the specified object. For more information
// about managing object tags, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html)
// This operation is not supported by directory buckets. Removes the entire tag
// set from the specified object. For more information about managing object tags,
// see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html)
// . To use this operation, you must have permission to perform the
// s3:DeleteObjectTagging action. To delete tags of a specific object version, add
// the versionId query parameter in the request. You will need permission for the
@ -38,16 +39,18 @@ func (c *Client) DeleteObjectTagging(ctx context.Context, params *DeleteObjectTa
type DeleteObjectTaggingInput struct {
// The bucket name containing the objects from which to remove the tags. When
// using this action with an access point, you must direct requests to the access
// point hostname. The access point hostname takes the form
// The bucket name containing the objects from which to remove the tags. Access
// points - When you use this action with an access point, you must provide the
// alias of the access point in place of the bucket name or specify the access
// point ARN. When using the access point ARN, you must direct requests to the
// access point hostname. The access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
// access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. When you use this action with Amazon S3 on
// Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on
// Outposts hostname takes the form
// in the Amazon S3 User Guide. S3 on Outposts - When you use this action with
// Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname.
// The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
@ -62,9 +65,9 @@ type DeleteObjectTaggingInput struct {
// This member is required.
Key *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
// The versionId of the object that the tag-set will be removed from.
@ -144,6 +147,9 @@ func (c *Client) addOperationDeleteObjectTaggingMiddlewares(stack *middleware.St
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteObjectTaggingValidationMiddleware(stack); err != nil {
return err
}

View file

@ -14,31 +14,72 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// This action enables you to delete multiple objects from a bucket using a single
// HTTP request. If you know the object keys that you want to delete, then this
// action provides a suitable alternative to sending individual delete requests,
// reducing per-request overhead. The request contains a list of up to 1000 keys
// that you want to delete. In the XML, you provide the object key names, and
// optionally, version IDs if you want to delete a specific version of the object
// from a versioning-enabled bucket. For each key, Amazon S3 performs a delete
// action and returns the result of that delete, success, or failure, in the
// response. Note that if the object specified in the request is not found, Amazon
// S3 returns the result as deleted. The action supports two modes for the
// response: verbose and quiet. By default, the action uses verbose mode in which
// the response includes the result of deletion of each key in your request. In
// quiet mode the response includes only keys where the delete action encountered
// an error. For a successful deletion, the action does not return any information
// about the delete in the response body. When performing this action on an MFA
// Delete enabled bucket, that attempts to delete any versioned objects, you must
// include an MFA token. If you do not provide one, the entire request will fail,
// even if there are non-versioned objects you are trying to delete. If you provide
// an invalid token, whether there are versioned keys in the request or not, the
// entire Multi-Object Delete request will fail. For information about MFA Delete,
// see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete)
// . Finally, the Content-MD5 header is required for all Multi-Object Delete
// requests. Amazon S3 uses the header value to ensure that your request body has
// not been altered in transit. The following operations are related to
// DeleteObjects :
// This operation enables you to delete multiple objects from a bucket using a
// single HTTP request. If you know the object keys that you want to delete, then
// this operation provides a suitable alternative to sending individual delete
// requests, reducing per-request overhead. The request can contain a list of up to
// 1000 keys that you want to delete. In the XML, you provide the object key names,
// and optionally, version IDs if you want to delete a specific version of the
// object from a versioning-enabled bucket. For each key, Amazon S3 performs a
// delete operation and returns the result of that delete, success or failure, in
// the response. Note that if the object specified in the request is not found,
// Amazon S3 returns the result as deleted.
// - Directory buckets - S3 Versioning isn't enabled and supported for directory
// buckets.
// - Directory buckets - For directory buckets, you must make requests for this
// API operation to the Zonal endpoint. These endpoints support
// virtual-hosted-style requests in the format
// https://bucket_name.s3express-az_id.region.amazonaws.com/key-name .
// Path-style requests are not supported. For more information, see Regional and
// Zonal endpoints (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-Regions-and-Zones.html)
// in the Amazon S3 User Guide.
//
// The operation supports two modes for the response: verbose and quiet. By
// default, the operation uses verbose mode in which the response includes the
// result of deletion of each key in your request. In quiet mode the response
// includes only keys where the delete operation encountered an error. For a
// successful deletion in a quiet mode, the operation does not return any
// information about the delete in the response body. When performing this action
// on an MFA Delete enabled bucket, that attempts to delete any versioned objects,
// you must include an MFA token. If you do not provide one, the entire request
// will fail, even if there are non-versioned objects you are trying to delete. If
// you provide an invalid token, whether there are versioned keys in the request or
// not, the entire Multi-Object Delete request will fail. For information about MFA
// Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete)
// in the Amazon S3 User Guide. Directory buckets - MFA delete is not supported by
// directory buckets. Permissions
// - General purpose bucket permissions - The following permissions are required
// in your policies when your DeleteObjects request includes specific headers.
// - s3:DeleteObject - To delete an object from a bucket, you must always specify
// the s3:DeleteObject permission.
// - s3:DeleteObjectVersion - To delete a specific version of an object from a
// versiong-enabled bucket, you must specify the s3:DeleteObjectVersion
// permission.
// - Directory bucket permissions - To grant access to this API operation on a
// directory bucket, we recommend that you use the CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
// API operation for session-based authorization. Specifically, you grant the
// s3express:CreateSession permission to the directory bucket in a bucket policy
// or an IAM identity-based policy. Then, you make the CreateSession API call on
// the bucket to obtain a session token. With the session token in your request
// header, you can make API requests to this operation. After the session token
// expires, you make another CreateSession API call to generate a new session
// token for use. Amazon Web Services CLI or SDKs create session and refresh the
// session token automatically to avoid service interruptions when a session
// expires. For more information about authorization, see CreateSession (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateSession.html)
// .
//
// Content-MD5 request header
// - General purpose bucket - The Content-MD5 request header is required for all
// Multi-Object Delete requests. Amazon S3 uses the header value to ensure that
// your request body has not been altered in transit.
// - Directory bucket - The Content-MD5 request header or a additional checksum
// request header (including x-amz-checksum-crc32 , x-amz-checksum-crc32c ,
// x-amz-checksum-sha1 , or x-amz-checksum-sha256 ) is required for all
// Multi-Object Delete requests.
//
// HTTP Host header syntax Directory buckets - The HTTP Host header syntax is
// Bucket_name.s3express-az_id.region.amazonaws.com . The following operations are
// related to DeleteObjects :
// - CreateMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html)
// - UploadPart (https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html)
// - CompleteMultipartUpload (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html)
@ -61,16 +102,26 @@ func (c *Client) DeleteObjects(ctx context.Context, params *DeleteObjectsInput,
type DeleteObjectsInput struct {
// The bucket name containing the objects to delete. When using this action with
// an access point, you must direct requests to the access point hostname. The
// access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
// access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. When you use this action with Amazon S3 on
// Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on
// Outposts hostname takes the form
// The bucket name containing the objects to delete. Directory buckets - When you
// use this operation with a directory bucket, you must use virtual-hosted-style
// requests in the format Bucket_name.s3express-az_id.region.amazonaws.com .
// Path-style requests are not supported. Directory bucket names must be unique in
// the chosen Availability Zone. Bucket names must follow the format
// bucket_base_name--az-id--x-s3 (for example, DOC-EXAMPLE-BUCKET--usw2-az2--x-s3
// ). For information about bucket naming restrictions, see Directory bucket
// naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html)
// in the Amazon S3 User Guide. Access points - When you use this action with an
// access point, you must provide the alias of the access point in place of the
// bucket name or specify the access point ARN. When using the access point ARN,
// you must direct requests to the access point hostname. The access point hostname
// takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this action with an access point through the Amazon Web Services
// SDKs, you provide the access point ARN in place of the bucket name. For more
// information about access point ARNs, see Using access points (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. Access points and Object Lambda access points are
// not supported by directory buckets. S3 on Outposts - When you use this action
// with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts
// hostname. The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . When you
// use this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts access point ARN in place of the bucket name. For more
@ -87,39 +138,57 @@ type DeleteObjectsInput struct {
// Specifies whether you want to delete this object even if it has a
// Governance-type Object Lock in place. To use this header, you must have the
// s3:BypassGovernanceRetention permission.
BypassGovernanceRetention bool
// s3:BypassGovernanceRetention permission. This functionality is not supported for
// directory buckets.
BypassGovernanceRetention *bool
// Indicates the algorithm used to create the checksum for the object when using
// the SDK. This header will not provide any additional functionality if not using
// the SDK. When sending this header, there must be a corresponding x-amz-checksum
// or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the
// HTTP status code 400 Bad Request . For more information, see Checking object
// integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
// ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must
// be the same for all parts and it match the checksum value supplied in the
// CreateMultipartUpload request.
// Indicates the algorithm used to create the checksum for the object when you use
// the SDK. This header will not provide any additional functionality if you don't
// use the SDK. When you send this header, there must be a corresponding
// x-amz-checksum-algorithm or x-amz-trailer header sent. Otherwise, Amazon S3
// fails the request with the HTTP status code 400 Bad Request . For the
// x-amz-checksum-algorithm header, replace algorithm with the supported
// algorithm from the following list:
// - CRC32
// - CRC32C
// - SHA1
// - SHA256
// For more information, see Checking object integrity (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide. If the individual checksum value you provide
// through x-amz-checksum-algorithm doesn't match the checksum algorithm you set
// through x-amz-sdk-checksum-algorithm , Amazon S3 ignores any provided
// ChecksumAlgorithm parameter and uses the checksum algorithm that matches the
// provided value in x-amz-checksum-algorithm . If you provide an individual
// checksum, Amazon S3 ignores any provided ChecksumAlgorithm parameter.
ChecksumAlgorithm types.ChecksumAlgorithm
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
// The concatenation of the authentication device's serial number, a space, and
// the value that is displayed on your authentication device. Required to
// permanently delete a versioned object if versioning is configured with MFA
// delete enabled.
// delete enabled. When performing the DeleteObjects operation on an MFA delete
// enabled bucket, which attempts to delete the specified versioned objects, you
// must include an MFA token. If you don't provide an MFA token, the entire request
// will fail, even if there are non-versioned objects that you are trying to
// delete. If you provide an invalid token, whether there are versioned object keys
// in the request or not, the entire Multi-Object Delete request will fail. For
// information about MFA Delete, see MFA Delete (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete)
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets.
MFA *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets.
RequestPayer types.RequestPayer
noSmithyDocumentSerde
@ -141,7 +210,7 @@ type DeleteObjectsOutput struct {
Errors []types.Error
// If present, indicates that the requester was successfully charged for the
// request.
// request. This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// Metadata pertaining to the operation's result.
@ -205,6 +274,9 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil {
return err
}
@ -241,6 +313,9 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o
if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
return err
}
if err = s3cust.AddExpressDefaultChecksumMiddleware(stack); err != nil {
return err
}
return nil
}

View file

@ -9,13 +9,15 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use
// this operation, you must have the s3:PutBucketPublicAccessBlock permission. For
// more information about permissions, see Permissions Related to Bucket
// Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// This operation is not supported by directory buckets. Removes the
// PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation,
// you must have the s3:PutBucketPublicAccessBlock permission. For more
// information about permissions, see Permissions Related to Bucket Subresource
// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
// . The following operations are related to DeletePublicAccessBlock :
// - Using Amazon S3 Block Public Access (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html)
@ -44,9 +46,9 @@ type DeletePublicAccessBlockInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -54,7 +56,7 @@ type DeletePublicAccessBlockInput struct {
func (in *DeletePublicAccessBlockInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type DeletePublicAccessBlockOutput struct {
@ -119,6 +121,9 @@ func (c *Client) addOperationDeletePublicAccessBlockMiddlewares(stack *middlewar
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpDeletePublicAccessBlockValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,17 +10,19 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// This implementation of the GET action uses the accelerate subresource to return
// the Transfer Acceleration state of a bucket, which is either Enabled or
// Suspended . Amazon S3 Transfer Acceleration is a bucket-level feature that
// enables you to perform faster data transfers to and from Amazon S3. To use this
// operation, you must have permission to perform the s3:GetAccelerateConfiguration
// action. The bucket owner has this permission by default. The bucket owner can
// grant this permission to others. For more information about permissions, see
// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// This operation is not supported by directory buckets. This implementation of
// the GET action uses the accelerate subresource to return the Transfer
// Acceleration state of a bucket, which is either Enabled or Suspended . Amazon S3
// Transfer Acceleration is a bucket-level feature that enables you to perform
// faster data transfers to and from Amazon S3. To use this operation, you must
// have permission to perform the s3:GetAccelerateConfiguration action. The bucket
// owner has this permission by default. The bucket owner can grant this permission
// to others. For more information about permissions, see Permissions Related to
// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// and Managing Access Permissions to your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
// in the Amazon S3 User Guide. You set the Transfer Acceleration state of an
// existing bucket to Enabled or Suspended by using the
@ -54,18 +56,19 @@ type GetBucketAccelerateConfigurationInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination Amazon S3 bucket has Requester Pays enabled, the requester
// will pay for corresponding charges to copy the object. For information about
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see Downloading Objects in
// Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
// in the Amazon S3 User Guide. This functionality is not supported for directory
// buckets.
RequestPayer types.RequestPayer
noSmithyDocumentSerde
@ -73,13 +76,13 @@ type GetBucketAccelerateConfigurationInput struct {
func (in *GetBucketAccelerateConfigurationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketAccelerateConfigurationOutput struct {
// If present, indicates that the requester was successfully charged for the
// request.
// request. This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// The accelerate configuration of the bucket.
@ -146,6 +149,9 @@ func (c *Client) addOperationGetBucketAccelerateConfigurationMiddlewares(stack *
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketAccelerateConfigurationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,17 +10,19 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// This implementation of the GET action uses the acl subresource to return the
// access control list (ACL) of a bucket. To use GET to return the ACL of the
// bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is
// granted to the anonymous user, you can return the ACL of the bucket without
// using an authorization header. To use this API operation against an access
// point, provide the alias of the access point in place of the bucket name. To use
// this API operation against an Object Lambda access point, provide the alias of
// the Object Lambda access point in place of the bucket name. If the Object Lambda
// This operation is not supported by directory buckets. This implementation of
// the GET action uses the acl subresource to return the access control list (ACL)
// of a bucket. To use GET to return the ACL of the bucket, you must have the
// READ_ACP access to the bucket. If READ_ACP permission is granted to the
// anonymous user, you can return the ACL of the bucket without using an
// authorization header. When you use this API operation with an access point,
// provide the alias of the access point in place of the bucket name. When you use
// this API operation with an Object Lambda access point, provide the alias of the
// Object Lambda access point in place of the bucket name. If the Object Lambda
// access point alias in a request is not valid, the error code
// InvalidAccessPointAliasError is returned. For more information about
// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
@ -49,21 +51,21 @@ func (c *Client) GetBucketAcl(ctx context.Context, params *GetBucketAclInput, op
type GetBucketAclInput struct {
// Specifies the S3 bucket whose ACL is being requested. To use this API operation
// against an access point, provide the alias of the access point in place of the
// bucket name. To use this API operation against an Object Lambda access point,
// provide the alias of the Object Lambda access point in place of the bucket name.
// If the Object Lambda access point alias in a request is not valid, the error
// code InvalidAccessPointAliasError is returned. For more information about
// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// Specifies the S3 bucket whose ACL is being requested. When you use this API
// operation with an access point, provide the alias of the access point in place
// of the bucket name. When you use this API operation with an Object Lambda access
// point, provide the alias of the Object Lambda access point in place of the
// bucket name. If the Object Lambda access point alias in a request is not valid,
// the error code InvalidAccessPointAliasError is returned. For more information
// about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// .
//
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -71,7 +73,7 @@ type GetBucketAclInput struct {
func (in *GetBucketAclInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketAclOutput struct {
@ -143,6 +145,9 @@ func (c *Client) addOperationGetBucketAclMiddlewares(stack *middleware.Stack, op
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketAclValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,15 +10,17 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// This implementation of the GET action returns an analytics configuration
// (identified by the analytics configuration ID) from the bucket. To use this
// operation, you must have permissions to perform the s3:GetAnalyticsConfiguration
// action. The bucket owner has this permission by default. The bucket owner can
// grant this permission to others. For more information about permissions, see
// Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// This operation is not supported by directory buckets. This implementation of
// the GET action returns an analytics configuration (identified by the analytics
// configuration ID) from the bucket. To use this operation, you must have
// permissions to perform the s3:GetAnalyticsConfiguration action. The bucket
// owner has this permission by default. The bucket owner can grant this permission
// to others. For more information about permissions, see Permissions Related to
// Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
// in the Amazon S3 User Guide. For information about Amazon S3 analytics feature,
// see Amazon S3 Analytics Storage Class Analysis (https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html)
@ -54,9 +56,9 @@ type GetBucketAnalyticsConfigurationInput struct {
// This member is required.
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -64,7 +66,7 @@ type GetBucketAnalyticsConfigurationInput struct {
func (in *GetBucketAnalyticsConfigurationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketAnalyticsConfigurationOutput struct {
@ -133,6 +135,9 @@ func (c *Client) addOperationGetBucketAnalyticsConfigurationMiddlewares(stack *m
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketAnalyticsConfigurationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,19 +10,21 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Returns the Cross-Origin Resource Sharing (CORS) configuration information set
// for the bucket. To use this operation, you must have permission to perform the
// s3:GetBucketCORS action. By default, the bucket owner has this permission and
// can grant it to others. To use this API operation against an access point,
// provide the alias of the access point in place of the bucket name. To use this
// API operation against an Object Lambda access point, provide the alias of the
// Object Lambda access point in place of the bucket name. If the Object Lambda
// access point alias in a request is not valid, the error code
// InvalidAccessPointAliasError is returned. For more information about
// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// This operation is not supported by directory buckets. Returns the Cross-Origin
// Resource Sharing (CORS) configuration information set for the bucket. To use
// this operation, you must have permission to perform the s3:GetBucketCORS
// action. By default, the bucket owner has this permission and can grant it to
// others. When you use this API operation with an access point, provide the alias
// of the access point in place of the bucket name. When you use this API operation
// with an Object Lambda access point, provide the alias of the Object Lambda
// access point in place of the bucket name. If the Object Lambda access point
// alias in a request is not valid, the error code InvalidAccessPointAliasError is
// returned. For more information about InvalidAccessPointAliasError , see List of
// Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// . For more information about CORS, see Enabling Cross-Origin Resource Sharing (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html)
// . The following operations are related to GetBucketCors :
// - PutBucketCors (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html)
@ -44,21 +46,21 @@ func (c *Client) GetBucketCors(ctx context.Context, params *GetBucketCorsInput,
type GetBucketCorsInput struct {
// The bucket name for which to get the cors configuration. To use this API
// operation against an access point, provide the alias of the access point in
// place of the bucket name. To use this API operation against an Object Lambda
// access point, provide the alias of the Object Lambda access point in place of
// the bucket name. If the Object Lambda access point alias in a request is not
// valid, the error code InvalidAccessPointAliasError is returned. For more
// information about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// The bucket name for which to get the cors configuration. When you use this API
// operation with an access point, provide the alias of the access point in place
// of the bucket name. When you use this API operation with an Object Lambda access
// point, provide the alias of the Object Lambda access point in place of the
// bucket name. If the Object Lambda access point alias in a request is not valid,
// the error code InvalidAccessPointAliasError is returned. For more information
// about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// .
//
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -66,7 +68,7 @@ type GetBucketCorsInput struct {
func (in *GetBucketCorsInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketCorsOutput struct {
@ -136,6 +138,9 @@ func (c *Client) addOperationGetBucketCorsMiddlewares(stack *middleware.Stack, o
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketCorsValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,14 +10,15 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Returns the default encryption configuration for an Amazon S3 bucket. By
// default, all buckets have a default encryption configuration that uses
// server-side encryption with Amazon S3 managed keys (SSE-S3). For information
// about the bucket default encryption feature, see Amazon S3 Bucket Default
// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
// This operation is not supported by directory buckets. Returns the default
// encryption configuration for an Amazon S3 bucket. By default, all buckets have a
// default encryption configuration that uses server-side encryption with Amazon S3
// managed keys (SSE-S3). For information about the bucket default encryption
// feature, see Amazon S3 Bucket Default Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
// in the Amazon S3 User Guide. To use this operation, you must have permission to
// perform the s3:GetEncryptionConfiguration action. The bucket owner has this
// permission by default. The bucket owner can grant this permission to others. For
@ -50,9 +51,9 @@ type GetBucketEncryptionInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -60,7 +61,7 @@ type GetBucketEncryptionInput struct {
func (in *GetBucketEncryptionInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketEncryptionOutput struct {
@ -129,6 +130,9 @@ func (c *Client) addOperationGetBucketEncryptionMiddlewares(stack *middleware.St
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketEncryptionValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,10 +10,12 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Gets the S3 Intelligent-Tiering configuration from the specified bucket. The S3
// This operation is not supported by directory buckets. Gets the S3
// Intelligent-Tiering configuration from the specified bucket. The S3
// Intelligent-Tiering storage class is designed to optimize storage costs by
// automatically moving data to the most cost-effective storage access tier,
// without performance impact or operational overhead. S3 Intelligent-Tiering
@ -64,7 +66,7 @@ type GetBucketIntelligentTieringConfigurationInput struct {
func (in *GetBucketIntelligentTieringConfigurationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketIntelligentTieringConfigurationOutput struct {
@ -133,6 +135,9 @@ func (c *Client) addOperationGetBucketIntelligentTieringConfigurationMiddlewares
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketIntelligentTieringConfigurationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,13 +10,15 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Returns an inventory configuration (identified by the inventory configuration
// ID) from the bucket. To use this operation, you must have permissions to perform
// the s3:GetInventoryConfiguration action. The bucket owner has this permission
// by default and can grant this permission to others. For more information about
// This operation is not supported by directory buckets. Returns an inventory
// configuration (identified by the inventory configuration ID) from the bucket. To
// use this operation, you must have permissions to perform the
// s3:GetInventoryConfiguration action. The bucket owner has this permission by
// default and can grant this permission to others. For more information about
// permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
// . For information about the Amazon S3 inventory feature, see Amazon S3 Inventory (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html)
@ -51,9 +53,9 @@ type GetBucketInventoryConfigurationInput struct {
// This member is required.
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -61,7 +63,7 @@ type GetBucketInventoryConfigurationInput struct {
func (in *GetBucketInventoryConfigurationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketInventoryConfigurationOutput struct {
@ -130,6 +132,9 @@ func (c *Client) addOperationGetBucketInventoryConfigurationMiddlewares(stack *m
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketInventoryConfigurationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,16 +10,17 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Bucket lifecycle configuration now supports specifying a lifecycle rule using
// an object key name prefix, one or more object tags, or a combination of both.
// Accordingly, this section describes the latest API. The response describes the
// new filter element that you can use to specify a filter to select a subset of
// objects to which the rule applies. If you are using a previous version of the
// lifecycle configuration, it still works. For the earlier action, see
// GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)
// This operation is not supported by directory buckets. Bucket lifecycle
// configuration now supports specifying a lifecycle rule using an object key name
// prefix, one or more object tags, or a combination of both. Accordingly, this
// section describes the latest API. The response describes the new filter element
// that you can use to specify a filter to select a subset of objects to which the
// rule applies. If you are using a previous version of the lifecycle
// configuration, it still works. For the earlier action, see GetBucketLifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html)
// . Returns the lifecycle configuration information set on the bucket. For
// information about lifecycle configuration, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
// . To use this operation, you must have permission to perform the
@ -60,9 +61,9 @@ type GetBucketLifecycleConfigurationInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -70,7 +71,7 @@ type GetBucketLifecycleConfigurationInput struct {
func (in *GetBucketLifecycleConfigurationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketLifecycleConfigurationOutput struct {
@ -139,6 +140,9 @@ func (c *Client) addOperationGetBucketLifecycleConfigurationMiddlewares(stack *m
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketLifecycleConfigurationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -15,17 +15,19 @@ import (
smithyxml "github.com/aws/smithy-go/encoding/xml"
smithyio "github.com/aws/smithy-go/io"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
"io"
)
// Returns the Region the bucket resides in. You set the bucket's Region using the
// LocationConstraint request parameter in a CreateBucket request. For more
// information, see CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
// . To use this API operation against an access point, provide the alias of the
// access point in place of the bucket name. To use this API operation against an
// Object Lambda access point, provide the alias of the Object Lambda access point
// in place of the bucket name. If the Object Lambda access point alias in a
// This operation is not supported by directory buckets. Returns the Region the
// bucket resides in. You set the bucket's Region using the LocationConstraint
// request parameter in a CreateBucket request. For more information, see
// CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
// . When you use this API operation with an access point, provide the alias of the
// access point in place of the bucket name. When you use this API operation with
// an Object Lambda access point, provide the alias of the Object Lambda access
// point in place of the bucket name. If the Object Lambda access point alias in a
// request is not valid, the error code InvalidAccessPointAliasError is returned.
// For more information about InvalidAccessPointAliasError , see List of Error
// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
@ -52,21 +54,21 @@ func (c *Client) GetBucketLocation(ctx context.Context, params *GetBucketLocatio
type GetBucketLocationInput struct {
// The name of the bucket for which to get the location. To use this API operation
// against an access point, provide the alias of the access point in place of the
// bucket name. To use this API operation against an Object Lambda access point,
// provide the alias of the Object Lambda access point in place of the bucket name.
// If the Object Lambda access point alias in a request is not valid, the error
// code InvalidAccessPointAliasError is returned. For more information about
// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// The name of the bucket for which to get the location. When you use this API
// operation with an access point, provide the alias of the access point in place
// of the bucket name. When you use this API operation with an Object Lambda access
// point, provide the alias of the Object Lambda access point in place of the
// bucket name. If the Object Lambda access point alias in a request is not valid,
// the error code InvalidAccessPointAliasError is returned. For more information
// about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// .
//
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -74,7 +76,7 @@ type GetBucketLocationInput struct {
func (in *GetBucketLocationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketLocationOutput struct {
@ -148,6 +150,9 @@ func (c *Client) addOperationGetBucketLocationMiddlewares(stack *middleware.Stac
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketLocationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,12 +10,13 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Returns the logging status of a bucket and the permissions users have to view
// and modify that status. The following operations are related to GetBucketLogging
// :
// This operation is not supported by directory buckets. Returns the logging
// status of a bucket and the permissions users have to view and modify that
// status. The following operations are related to GetBucketLogging :
// - CreateBucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html)
// - PutBucketLogging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html)
func (c *Client) GetBucketLogging(ctx context.Context, params *GetBucketLoggingInput, optFns ...func(*Options)) (*GetBucketLoggingOutput, error) {
@ -40,9 +41,9 @@ type GetBucketLoggingInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -50,7 +51,7 @@ type GetBucketLoggingInput struct {
func (in *GetBucketLoggingInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketLoggingOutput struct {
@ -121,6 +122,9 @@ func (c *Client) addOperationGetBucketLoggingMiddlewares(stack *middleware.Stack
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketLoggingValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,16 +10,17 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Gets a metrics configuration (specified by the metrics configuration ID) from
// the bucket. Note that this doesn't include the daily storage metrics. To use
// this operation, you must have permissions to perform the
// s3:GetMetricsConfiguration action. The bucket owner has this permission by
// default. The bucket owner can grant this permission to others. For more
// information about permissions, see Permissions Related to Bucket Subresource
// Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// This operation is not supported by directory buckets. Gets a metrics
// configuration (specified by the metrics configuration ID) from the bucket. Note
// that this doesn't include the daily storage metrics. To use this operation, you
// must have permissions to perform the s3:GetMetricsConfiguration action. The
// bucket owner has this permission by default. The bucket owner can grant this
// permission to others. For more information about permissions, see Permissions
// Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources)
// and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html)
// . For information about CloudWatch request metrics for Amazon S3, see
// Monitoring Metrics with Amazon CloudWatch (https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html)
@ -56,9 +57,9 @@ type GetBucketMetricsConfigurationInput struct {
// This member is required.
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -66,7 +67,7 @@ type GetBucketMetricsConfigurationInput struct {
func (in *GetBucketMetricsConfigurationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketMetricsConfigurationOutput struct {
@ -135,6 +136,9 @@ func (c *Client) addOperationGetBucketMetricsConfigurationMiddlewares(stack *mid
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketMetricsConfigurationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,21 +10,23 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Returns the notification configuration of a bucket. If notifications are not
// enabled on the bucket, the action returns an empty NotificationConfiguration
// element. By default, you must be the bucket owner to read the notification
// configuration of a bucket. However, the bucket owner can use a bucket policy to
// grant permission to other users to read this configuration with the
// s3:GetBucketNotification permission. To use this API operation against an access
// point, provide the alias of the access point in place of the bucket name. To use
// this API operation against an Object Lambda access point, provide the alias of
// the Object Lambda access point in place of the bucket name. If the Object Lambda
// access point alias in a request is not valid, the error code
// InvalidAccessPointAliasError is returned. For more information about
// InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// This operation is not supported by directory buckets. Returns the notification
// configuration of a bucket. If notifications are not enabled on the bucket, the
// action returns an empty NotificationConfiguration element. By default, you must
// be the bucket owner to read the notification configuration of a bucket. However,
// the bucket owner can use a bucket policy to grant permission to other users to
// read this configuration with the s3:GetBucketNotification permission. When you
// use this API operation with an access point, provide the alias of the access
// point in place of the bucket name. When you use this API operation with an
// Object Lambda access point, provide the alias of the Object Lambda access point
// in place of the bucket name. If the Object Lambda access point alias in a
// request is not valid, the error code InvalidAccessPointAliasError is returned.
// For more information about InvalidAccessPointAliasError , see List of Error
// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// . For more information about setting and reading the notification configuration
// on a bucket, see Setting Up Notification of Bucket Events (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
// . For more information about bucket policies, see Using Bucket Policies (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html)
@ -47,21 +49,22 @@ func (c *Client) GetBucketNotificationConfiguration(ctx context.Context, params
type GetBucketNotificationConfigurationInput struct {
// The name of the bucket for which to get the notification configuration. To use
// this API operation against an access point, provide the alias of the access
// point in place of the bucket name. To use this API operation against an Object
// Lambda access point, provide the alias of the Object Lambda access point in
// place of the bucket name. If the Object Lambda access point alias in a request
// is not valid, the error code InvalidAccessPointAliasError is returned. For more
// information about InvalidAccessPointAliasError , see List of Error Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// The name of the bucket for which to get the notification configuration. When
// you use this API operation with an access point, provide the alias of the access
// point in place of the bucket name. When you use this API operation with an
// Object Lambda access point, provide the alias of the Object Lambda access point
// in place of the bucket name. If the Object Lambda access point alias in a
// request is not valid, the error code InvalidAccessPointAliasError is returned.
// For more information about InvalidAccessPointAliasError , see List of Error
// Codes (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ErrorCodeList)
// .
//
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -69,7 +72,7 @@ type GetBucketNotificationConfigurationInput struct {
func (in *GetBucketNotificationConfigurationInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
// A container for specifying the notification configuration of the bucket. If
@ -152,6 +155,9 @@ func (c *Client) addOperationGetBucketNotificationConfigurationMiddlewares(stack
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketNotificationConfigurationValidationMiddleware(stack); err != nil {
return err
}

View file

@ -10,12 +10,14 @@ import (
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you
// must have the s3:GetBucketOwnershipControls permission. For more information
// about Amazon S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html)
// This operation is not supported by directory buckets. Retrieves
// OwnershipControls for an Amazon S3 bucket. To use this operation, you must have
// the s3:GetBucketOwnershipControls permission. For more information about Amazon
// S3 permissions, see Specifying permissions in a policy (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html)
// . For information about Amazon S3 Object Ownership, see Using Object Ownership (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// . The following operations are related to GetBucketOwnershipControls :
// - PutBucketOwnershipControls
@ -42,9 +44,9 @@ type GetBucketOwnershipControlsInput struct {
// This member is required.
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
// The account ID of the expected bucket owner. If the account ID that you provide
// does not match the actual owner of the bucket, the request fails with the HTTP
// status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -52,7 +54,7 @@ type GetBucketOwnershipControlsInput struct {
func (in *GetBucketOwnershipControlsInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.UseS3ExpressControlEndpoint = ptr.Bool(true)
}
type GetBucketOwnershipControlsOutput struct {
@ -122,6 +124,9 @@ func (c *Client) addOperationGetBucketOwnershipControlsMiddlewares(stack *middle
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addOpGetBucketOwnershipControlsValidationMiddleware(stack); err != nil {
return err
}

Some files were not shown because too many files have changed in this diff Show more