vendor: make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2022-10-01 17:20:01 +03:00
parent 87c77727e4
commit 366f04001b
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
211 changed files with 8576 additions and 932 deletions

34
go.mod
View file

@ -12,9 +12,9 @@ require (
github.com/VictoriaMetrics/metrics v1.22.2
github.com/VictoriaMetrics/metricsql v0.45.0
github.com/aws/aws-sdk-go-v2 v1.16.16
github.com/aws/aws-sdk-go-v2/config v1.17.7
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.7.1
github.com/aws/aws-sdk-go-v2/service/s3 v1.19.0
github.com/aws/aws-sdk-go-v2/config v1.17.8
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.34
github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11
github.com/cespare/xxhash/v2 v2.1.2
github.com/cheggaaa/pb/v3 v3.1.0
github.com/golang/snappy v0.0.4
@ -22,35 +22,37 @@ require (
github.com/influxdata/influxdb v1.10.0
github.com/klauspost/compress v1.15.11
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
github.com/urfave/cli/v2 v2.16.3
github.com/urfave/cli/v2 v2.17.1
github.com/valyala/fastjson v1.6.3
github.com/valyala/fastrand v1.1.0
github.com/valyala/fasttemplate v1.2.1
github.com/valyala/gozstd v1.17.0
github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.0.0-20220923203811-8be639271d50
golang.org/x/net v0.0.0-20220930213112-107f3e3c3b0b
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8
google.golang.org/api v0.97.0
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec
google.golang.org/api v0.98.0
gopkg.in/yaml.v2 v2.4.0
)
require (
cloud.google.com/go v0.104.0 // indirect
cloud.google.com/go/compute v1.10.0 // indirect
cloud.google.com/go/iam v0.4.0 // indirect
cloud.google.com/go/iam v0.5.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.12.20 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.12.21 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 // indirect
github.com/aws/smithy-go v1.13.3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
@ -63,7 +65,7 @@ require (
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.2.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.16 // indirect
@ -83,11 +85,11 @@ require (
go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 // indirect
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220923205249-dd2d53f1fffc // indirect
google.golang.org/genproto v0.0.0-20220930163606-c98284e70a91 // indirect
google.golang.org/grpc v1.49.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
)

78
go.sum
View file

@ -50,8 +50,8 @@ cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOt
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/iam v0.4.0 h1:YBYU00SCDzZJdHqVc4I5d6lsklcYIjQZa1YmEz4jlSE=
cloud.google.com/go/iam v0.4.0/go.mod h1:cbaZxyScUhxl7ZAkNWiALgihfP75wS/fUsVNaa1r3vA=
cloud.google.com/go/iam v0.5.0 h1:fz9X5zyTWBmamZsqvqZqD7khbifcZF/q+Z1J8pfhIUg=
cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@ -150,49 +150,42 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.11.0/go.mod h1:SQfA+m2ltnu1cA0soUkj4dRSsmITiVQUJvBIZjzfPyQ=
github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk=
github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0 h1:yVUAwvJC/0WNPbyl0nA3j1L6CW1CN8wBubCRqtG7JLI=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.0.0/go.mod h1:Xn6sxgRuIDflLRJFj5Ev7UxABIkNbccFPV/p8itDReM=
github.com/aws/aws-sdk-go-v2/config v1.10.1/go.mod h1:auIv5pIIn3jIBHNRcVQcsczn6Pfa6Dyv80Fai0ueoJU=
github.com/aws/aws-sdk-go-v2/config v1.17.7 h1:odVM52tFHhpqZBKNjVW5h+Zt1tKHbhdTQRb+0WHrNtw=
github.com/aws/aws-sdk-go-v2/config v1.17.7/go.mod h1:dN2gja/QXxFF15hQreyrqYhLBaQo1d9ZKe/v/uplQoI=
github.com/aws/aws-sdk-go-v2/credentials v1.6.1/go.mod h1:QyvQk1IYTqBWSi1T6UgT/W8DMxBVa5pVuLFSRLLhGf8=
github.com/aws/aws-sdk-go-v2/credentials v1.12.20 h1:9+ZhlDY7N9dPnUmf7CDfW9In4sW5Ff3bh7oy4DzS1IE=
github.com/aws/aws-sdk-go-v2/credentials v1.12.20/go.mod h1:UKY5HyIux08bbNA7Blv4PcXQ8cTkGh7ghHMFklaviR4=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.8.0/go.mod h1:5E1J3/TTYy6z909QNR0QnXGBpfESYGDqd3O0zqONghU=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 h1:tcFliCWne+zOuUfKNRn8JdFBuWPDuISDH08wD2ULkhk=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8/go.mod h1:JTnlBSot91steJeti4ryyu/tLd4Sk84O5W22L7O2EQU=
github.com/aws/aws-sdk-go-v2/config v1.17.8 h1:b9LGqNnOdg9vR4Q43tBTVWk4J6F+W774MSchvKJsqnE=
github.com/aws/aws-sdk-go-v2/config v1.17.8/go.mod h1:UkCI3kb0sCdvtjiXYiU4Zx5h07BOpgBTtkPu/49r+kA=
github.com/aws/aws-sdk-go-v2/credentials v1.12.21 h1:4tjlyCD0hRGNQivh5dN8hbP30qQhMLBE/FgQR1vHHWM=
github.com/aws/aws-sdk-go-v2/credentials v1.12.21/go.mod h1:O+4XyAt4e+oBAoIwNUYkRg3CVMscaIJdmZBOcPgJ8D8=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 h1:r08j4sbZu/RVi+BNxkBJwPMUYY3P8mgSDuKkZ/ZN1lE=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.7.1 h1:p9Dys1g2YdaqMalnp6AwCA+tpMMdJNGw5YYKP/u3sUk=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.7.1/go.mod h1:wN/mvkow08GauDwJ70jnzJ1e+hE+Q3Q7TwpYLXOe9oI=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.0/go.mod h1:NO3Q5ZTTQtO2xIg2+xTXYDiT7knSejfeDm7WGDaOo0U=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.34 h1:1PNtaCM+2ruo1dfYL2RweUdtbuPvinjAejjNcPa/RQY=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.34/go.mod h1:+Six+CXNHYllXam32j+YW8ixk82+am345ei89kEz8p4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 h1:s4g/wnzMf+qepSNgTvaQQHNxyMLKSawNhKCPNy++2xY=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.0.0/go.mod h1:anlUzBoEWglcUxUQwZA7HQOEVEnQALVZsizAapB2hq8=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 h1:/K482T5A3623WJgWT8w1yRAFK4RzGzEl7y39yhtn9eA=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.0/go.mod h1:6oXGy4GLpypD3uCh8wcqztigGgmhLToMfjavgh+VySg=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24 h1:wj5Rwc05hvUSvKuOF29IYb9QrCLjU+rHAy/x/o0DK2c=
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.24/go.mod h1:jULHjqqjDlbyTa7pfM7WICATnOv+iOhjletM3N0Xbu8=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0 h1:lPLbw4Gn59uoKqvOfSnkJr54XWk5Ak1NK20ZEiSWb3U=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.5.0/go.mod h1:80NaCIH9YU3rzTTs/J/ECATjXuRqzo/wB6ukO6MZ0XY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.5.0/go.mod h1:Mq6AEc+oEjCUlBuLiK5YwW4shSOAKCQ3tXN0sQeYoBA=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14 h1:ZSIPAkAsCCjYrhqfw2+lNzWDzxzHXEckFkTePL5RSWQ=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.14/go.mod h1:AyGgqiKv9ECM6IZeNQtdT8NnMvUb3/2wokeq2Fgryto=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9 h1:Lh1AShsuIJTwMkoxVCAYPJgNG5H+eN6SmoUn8nOZ5wE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.9/go.mod h1:a9j48l6yL5XINLHLcOKInjdvknN+vWqPBxqeIDw7ktw=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18 h1:BBYoNQt2kUZUUK4bIPsKrCcjVPUMNsgQpNAwhznK/zo=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.18/go.mod h1:NS55eQ4YixUJPTC+INxi2/jCqe1y2Uw3rnh9wEOVJxY=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 h1:Jrd/oMh0PKQc6+BowB+pLEwLIgaQF29eYbe7E1Av9Ug=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.0 h1:0BOlTqnNnrEO04oYKzDxMMe68t107pmIotn18HtVonY=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.9.0/go.mod h1:xKCZ4YFSF2s4Hnb/J0TLeOsKuGzICzcElaOKNGrVnx4=
github.com/aws/aws-sdk-go-v2/service/s3 v1.19.0 h1:5mRAms4TjSTOGYsqKYte5kHr1PzpMJSyLThjF3J+hw0=
github.com/aws/aws-sdk-go-v2/service/s3 v1.19.0/go.mod h1:Gwz3aVctJe6mUY9T//bcALArPUaFmNAy2rTB9qN4No8=
github.com/aws/aws-sdk-go-v2/service/sso v1.6.0/go.mod h1:Q/l0ON1annSU+mc0JybDy1Gy6dnJxIcWjphO6qJPzvM=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17 h1:HfVVR1vItaG6le+Bpw6P4midjBDMKnjMyZnw9MXYUcE=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.17/go.mod h1:YqMdV+gEKCQ59NrB7rzrJdALeBIsYiVi8Inj3+KcqHI=
github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11 h1:3/gm/JTX9bX8CpzTgIlrtYpB3EVBDxyg/GY/QdcIEZw=
github.com/aws/aws-sdk-go-v2/service/s3 v1.27.11/go.mod h1:fmgDANqTUCxciViKl9hb/zD5LFbvPINFRgWhDbR+vZo=
github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 h1:pwvCchFUEnlceKIgPUouBJwK81aCkQ8UDMORfeFtW10=
github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5 h1:GUnZ62TevLqIoDyHeiWj2P7EqaosgakBKVvWriIdLQY=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA=
github.com/aws/aws-sdk-go-v2/service/sts v1.10.0/go.mod h1:jLKCFqS+1T4i7HDqCP9GM4Uk75YW1cS0o82LdxpMyOE=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6 h1:OwhhKc1P9ElfWbMKPIbMMZBV6hzJlL2JKD76wNNVzgQ=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.6/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 h1:9pPi0PsFNAGILFfPCk8Y0iyEBGc6lu6OQ97U7hmdesg=
github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM=
github.com/aws/smithy-go v1.9.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
github.com/aws/smithy-go v1.13.3 h1:l7LYxGuzK6/K+NzJ2mC+VvLUbae0sL3bXU//04MkmnA=
github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -511,8 +504,9 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw=
github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/enterprise-certificate-proxy v0.2.0 h1:y8Yozv7SZtlU//QXbezB6QkpuE6jMD2/gfzk4AftXjs=
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
@ -877,8 +871,8 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.16.3 h1:gHoFIwpPjoyIMbJp/VFd+/vuD0dAgFK4B6DpEMFJfQk=
github.com/urfave/cli/v2 v2.16.3/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/urfave/cli/v2 v2.17.1 h1:UzjDEw2dJQUE3iRaiNQ1VrVFbyAtKGH3VdkMoHA58V0=
github.com/urfave/cli/v2 v2.17.1/go.mod h1:1CNUng3PtjQMtRzJO4FMXBQvkGtuYRxxiR9xMa7jMwI=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
@ -1057,8 +1051,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20220923203811-8be639271d50 h1:vKyz8L3zkd+xrMeIaBsQ/MNVPVFSffdaU3ZyYlBGFnI=
golang.org/x/net v0.0.0-20220923203811-8be639271d50/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20220930213112-107f3e3c3b0b h1:uKO3Js8lXGjpjdc4J3rqs0/Ex5yDKUGfk43tTYWVLas=
golang.org/x/net v0.0.0-20220930213112-107f3e3c3b0b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1095,8 +1089,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 h1:ZrnxWX62AgTKOSagEqxvb3ffipvEDX2pl7E1TdqLqIc=
golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0 h1:cu5kTvlzcw1Q5S9f5ip1/cpiB4nXvw1XYzFPGgzLUOY=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1192,8 +1186,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc=
golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec h1:BkDtF2Ih9xZ7le9ndzTA7KJow28VbQW3odyk/8drmuI=
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1340,8 +1334,8 @@ google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
google.golang.org/api v0.97.0 h1:x/vEL1XDF/2V4xzdNgFPaKHluRESo2aTsL7QzHnBtGQ=
google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
google.golang.org/api v0.98.0 h1:yxZrcxXESimy6r6mdL5Q6EnZwmewDJK2dVg3g75s5Dg=
google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1433,8 +1427,8 @@ google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220923205249-dd2d53f1fffc h1:saaNe2+SBQxandnzcD/qB1JEBQ2Pqew+KlFLLdA/XcM=
google.golang.org/genproto v0.0.0-20220923205249-dd2d53f1fffc/go.mod h1:yEEpwVWKMZZzo81NwRgyEJnA2fQvpXAYPVisv8EgDVs=
google.golang.org/genproto v0.0.0-20220930163606-c98284e70a91 h1:Ezh2cpcnP5Rq60sLensUsFnxh7P6513NLvNtCm9iyJ4=
google.golang.org/genproto v0.0.0-20220930163606-c98284e70a91/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -1,5 +1,12 @@
# Changes
## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.4.0...iam/v0.5.0) (2022-09-28)
### Features
* **iam:** remove ListApplicablePolicies ([52dddd1](https://github.com/googleapis/google-cloud-go/commit/52dddd1ed89fbe77e1859311c3b993a77a82bfc7))
## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.3.0...iam/v0.4.0) (2022-09-06)

View file

@ -1,3 +1,51 @@
# v1.4.8 (2022-09-14)
* No change notes available for this release.
# v1.4.7 (2022-09-02)
* No change notes available for this release.
# v1.4.6 (2022-08-31)
* No change notes available for this release.
# v1.4.5 (2022-08-29)
* No change notes available for this release.
# v1.4.4 (2022-08-09)
* No change notes available for this release.
# v1.4.3 (2022-06-29)
* No change notes available for this release.
# v1.4.2 (2022-06-07)
* No change notes available for this release.
# v1.4.1 (2022-03-24)
* No change notes available for this release.
# v1.4.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.3.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.2.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.1.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.0.0 (2021-11-06)
* **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release.

View file

@ -3,4 +3,4 @@
package eventstream
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.0.0"
const goModuleVersion = "1.4.8"

View file

@ -1,3 +1,7 @@
# v1.17.8 (2022-09-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.17.7 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.17.7"
const goModuleVersion = "1.17.8"

View file

@ -1,3 +1,7 @@
# v1.12.21 (2022-09-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.20 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.12.20"
const goModuleVersion = "1.12.21"

View file

@ -1,3 +1,179 @@
# v1.11.34 (2022-09-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.33 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.32 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.31 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.30 (2022-08-31)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.29 (2022-08-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.28 (2022-08-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.27 (2022-08-15)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.26 (2022-08-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.25 (2022-08-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.24 (2022-08-10)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.23 (2022-08-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.22 (2022-08-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.21 (2022-08-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.20 (2022-07-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.19 (2022-07-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.18 (2022-07-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.17 (2022-06-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.16 (2022-06-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.15 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.14 (2022-05-26)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.13 (2022-05-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.12 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.11 (2022-05-16)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.10 (2022-05-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.9 (2022-05-06)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.8 (2022-05-03)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.7 (2022-04-27)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.6 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.5 (2022-04-12)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.4 (2022-04-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.1 (2022-01-28)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.8.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.5 (2021-12-21)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.4 (2021-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.3 (2021-11-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.2 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.7.1 (2021-11-12)
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -100,7 +100,8 @@ func WithDownloaderClientOptions(opts ...func(*s3.Options)) func(*Downloader) {
// interface.
//
// Example:
// // Load AWS Config
//
// // Load AWS Config
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
// panic(err)
@ -153,6 +154,7 @@ func NewDownloader(c DownloadAPIClient, options ...func(*Downloader)) *Downloade
// and GC runs.
//
// Example:
//
// // pre-allocate in memory buffer, where headObject type is *s3.HeadObjectOutput
// buf := make([]byte, int(headObject.ContentLength))
// // wrap with aws.WriteAtBuffer
@ -348,16 +350,16 @@ func (d *downloader) downloadRange(rng string) {
// downloadChunk downloads the chunk from s3
func (d *downloader) downloadChunk(chunk dlchunk) error {
in := &s3.GetObjectInput{}
awsutil.Copy(in, d.in)
var params s3.GetObjectInput
awsutil.Copy(&params, d.in)
// Get the next byte range of data
in.Range = aws.String(chunk.ByteRange())
params.Range = aws.String(chunk.ByteRange())
var n int64
var err error
for retry := 0; retry <= d.partBodyMaxRetries; retry++ {
n, err = d.tryDownloadChunk(in, &chunk)
n, err = d.tryDownloadChunk(&params, &chunk)
if err == nil {
break
}
@ -374,8 +376,9 @@ func (d *downloader) downloadChunk(chunk dlchunk) error {
chunk.cur = 0
d.cfg.Logger.Logf(logging.Debug, "object part body download interrupted %s, err, %v, retrying attempt %d",
aws.ToString(in.Key), err, retry)
d.cfg.Logger.Logf(logging.Debug,
"object part body download interrupted %s, err, %v, retrying attempt %d",
aws.ToString(params.Key), err, retry)
}
d.incrWritten(n)
@ -383,20 +386,24 @@ func (d *downloader) downloadChunk(chunk dlchunk) error {
return err
}
func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64, error) {
func (d *downloader) tryDownloadChunk(params *s3.GetObjectInput, w io.Writer) (int64, error) {
cleanup := func() {}
if d.cfg.BufferProvider != nil {
w, cleanup = d.cfg.BufferProvider.GetReadFrom(w)
}
defer cleanup()
resp, err := d.cfg.S3.GetObject(d.ctx, in, d.cfg.ClientOptions...)
resp, err := d.cfg.S3.GetObject(d.ctx, params, d.cfg.ClientOptions...)
if err != nil {
return 0, err
}
d.setTotalBytes(resp) // Set total if not yet set.
n, err := io.Copy(w, resp.Body)
var src io.Reader = resp.Body
if d.cfg.BufferProvider != nil {
src = &suppressWriterAt{suppressed: src}
}
n, err := io.Copy(w, src)
resp.Body.Close()
if err != nil {
return n, &errReadingBody{err: err}

View file

@ -3,4 +3,4 @@
package manager
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.7.1"
const goModuleVersion = "1.11.34"

View file

@ -4,12 +4,13 @@ import (
"bytes"
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"io"
"net/http"
"sort"
"sync"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/internal/awsutil"
"github.com/aws/aws-sdk-go-v2/service/s3"
@ -49,7 +50,6 @@ const DefaultUploadConcurrency = 5
// fmt.Printf("upload failure, %s\n", err.Error())
// }
// }
//
type MultiUploadFailure interface {
error
@ -71,7 +71,7 @@ type multiUploadError struct {
// batchItemError returns the string representation of the error.
//
// See apierr.BaseError ErrorWithExtra for output format
// # See apierr.BaseError ErrorWithExtra for output format
//
// Satisfies the error interface.
func (m *multiUploadError) Error() string {
@ -97,14 +97,62 @@ type UploadOutput struct {
// The URL where the object was uploaded to.
Location string
// The ID for a multipart upload to S3. In the case of an error the error
// can be cast to the MultiUploadFailure interface to extract the upload ID.
// Will be empty string if multipart upload was not used, and the object
// was uploaded as a single PutObject call.
UploadID string
// The list of parts that were uploaded and their checksums. Will be empty
// if multipart upload was not used, and the object was uploaded as a
// single PutObject call.
CompletedParts []types.CompletedPart
// Indicates whether the uploaded object uses an S3 Bucket Key for server-side
// encryption with Amazon Web Services KMS (SSE-KMS).
BucketKeyEnabled bool
// The base64-encoded, 32-bit CRC32 checksum of the object.
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object.
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object.
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object.
ChecksumSHA256 *string
// Entity tag for the uploaded object.
ETag *string
// If the object expiration is configured, this will contain the expiration date
// (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
Expiration *string
// The object key of the newly created object.
Key *string
// If present, indicates that the requester was successfully charged for the
// request.
RequestCharged types.RequestCharged
// If present, specifies the ID of the Amazon Web Services Key Management Service
// (Amazon Web Services KMS) symmetric customer managed customer master key (CMK)
// that was used for the object.
SSEKMSKeyId *string
// If you specified server-side encryption either with an Amazon S3-managed
// encryption key or an Amazon Web Services KMS customer master key (CMK) in your
// initiate multipart upload request, the response includes this header. It
// confirms the encryption algorithm that Amazon S3 used to encrypt the object.
ServerSideEncryption types.ServerSideEncryption
// The version of the object that was uploaded. Will only be populated if
// the S3 Bucket is versioned. If the bucket is not versioned this field
// will not be set.
VersionID *string
// The ID for a multipart upload to S3. In the case of an error the error
// can be cast to the MultiUploadFailure interface to extract the upload ID.
UploadID string
}
// WithUploaderRequestOptions appends to the Uploader's API client options.
@ -117,6 +165,35 @@ func WithUploaderRequestOptions(opts ...func(*s3.Options)) func(*Uploader) {
// The Uploader structure that calls Upload(). It is safe to call Upload()
// on this structure for multiple objects and across concurrent goroutines.
// Mutating the Uploader's properties is not safe to be done concurrently.
//
// # Pre-computed Checksums
//
// Care must be taken when using pre-computed checksums the transfer upload
// manager. The format and value of the checksum differs based on if the upload
// will preformed as a single or multipart upload.
//
// Uploads that are smaller than the Uploader's PartSize will be uploaded using
// the PutObject API operation. Pre-computed checksum of the uploaded object's
// content are valid for these single part uploads. If the checksum provided
// does not match the uploaded content the upload will fail.
//
// Uploads that are larger than the Uploader's PartSize will be uploaded using
// multi-part upload. The Pre-computed checksums for these uploads are a
// checksum of checksums of each part. Not a checksum of the full uploaded
// bytes. With the format of "<checksum of checksum>-<numberParts>", (e.g.
// "DUoRhQ==-3"). If a pre-computed checksum is provided that does not match
// this format, as matches the content uploaded, the upload will fail.
//
// ContentMD5 for multipart upload is explicitly ignored for multipart upload,
// and its value is suppressed.
//
// # Automatically Computed Checksums
//
// When the ChecksumAlgorithm member of Upload's input parameter PutObjectInput
// is set to a valid value, the SDK will automatically compute the checksum of
// the individual uploaded parts. The UploadOutput result from Upload will
// include the checksum of part checksums provided by S3
// CompleteMultipartUpload API call.
type Uploader struct {
// The buffer size (in bytes) to use when buffering data into chunks and
// sending them as parts to S3. The minimum allowed part size is 5MB, and
@ -172,6 +249,7 @@ type Uploader struct {
// satisfies the client.ConfigProvider interface.
//
// Example:
//
// // Load AWS Config
// cfg, err := config.LoadDefaultConfig(context.TODO())
// if err != nil {
@ -220,13 +298,17 @@ func NewUploader(client UploadAPIClient, options ...func(*Uploader)) *Uploader {
// options that will be applied to all API operations made with this uploader.
//
// It is safe to call this method concurrently across goroutines.
func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*Uploader)) (*UploadOutput, error) {
func (u Uploader) Upload(ctx context.Context, input *s3.PutObjectInput, opts ...func(*Uploader)) (
*UploadOutput, error,
) {
i := uploader{in: input, cfg: u, ctx: ctx}
// Copy ClientOptions
clientOptions := make([]func(*s3.Options), 0, len(i.cfg.ClientOptions)+1)
clientOptions = append(clientOptions, func(o *s3.Options) {
o.APIOptions = append(o.APIOptions, middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey))
o.APIOptions = append(o.APIOptions,
middleware.AddSDKAgentKey(middleware.FeatureMetadata, userAgentKey),
)
})
clientOptions = append(clientOptions, i.cfg.ClientOptions...)
i.cfg.ClientOptions = clientOptions
@ -402,22 +484,35 @@ func readFillBuf(r io.Reader, b []byte) (offset int, err error) {
func (u *uploader) singlePart(r io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
defer cleanup()
params := &s3.PutObjectInput{}
awsutil.Copy(params, u.in)
var params s3.PutObjectInput
awsutil.Copy(&params, u.in)
params.Body = r
// Need to use request form because URL generated in request is
// used in return.
var locationRecorder recordLocationClient
out, err := u.cfg.S3.PutObject(u.ctx, params, append(u.cfg.ClientOptions, locationRecorder.WrapClient())...)
out, err := u.cfg.S3.PutObject(u.ctx, &params,
append(u.cfg.ClientOptions, locationRecorder.WrapClient())...)
if err != nil {
return nil, err
}
return &UploadOutput{
Location: locationRecorder.location,
VersionID: out.VersionId,
Location: locationRecorder.location,
BucketKeyEnabled: out.BucketKeyEnabled,
ChecksumCRC32: out.ChecksumCRC32,
ChecksumCRC32C: out.ChecksumCRC32C,
ChecksumSHA1: out.ChecksumSHA1,
ChecksumSHA256: out.ChecksumSHA256,
ETag: out.ETag,
Expiration: out.Expiration,
Key: params.Key,
RequestCharged: out.RequestCharged,
SSEKMSKeyId: out.SSEKMSKeyId,
ServerSideEncryption: out.ServerSideEncryption,
VersionID: out.VersionId,
}, nil
}
@ -480,12 +575,13 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
// upload will perform a multipart upload using the firstBuf buffer containing
// the first chunk of data.
func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadOutput, error) {
params := &s3.CreateMultipartUploadInput{}
awsutil.Copy(params, u.in)
var params s3.CreateMultipartUploadInput
awsutil.Copy(&params, u.in)
// Create the multipart
var locationRecorder recordLocationClient
resp, err := u.cfg.S3.CreateMultipartUpload(u.ctx, params, append(u.cfg.ClientOptions, locationRecorder.WrapClient())...)
resp, err := u.cfg.S3.CreateMultipartUpload(u.ctx, &params,
append(u.cfg.ClientOptions, locationRecorder.WrapClient())...)
if err != nil {
cleanup()
return nil, err
@ -529,7 +625,7 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadO
// Close the channel, wait for workers, and complete upload
close(ch)
u.wg.Wait()
complete := u.complete()
completeOut := u.complete()
if err := u.geterr(); err != nil {
return nil, &multiUploadError{
@ -539,9 +635,22 @@ func (u *multiuploader) upload(firstBuf io.ReadSeeker, cleanup func()) (*UploadO
}
return &UploadOutput{
Location: locationRecorder.location,
VersionID: complete.VersionId,
UploadID: u.uploadID,
Location: locationRecorder.location,
UploadID: u.uploadID,
CompletedParts: u.parts,
BucketKeyEnabled: completeOut.BucketKeyEnabled,
ChecksumCRC32: completeOut.ChecksumCRC32,
ChecksumCRC32C: completeOut.ChecksumCRC32C,
ChecksumSHA1: completeOut.ChecksumSHA1,
ChecksumSHA256: completeOut.ChecksumSHA256,
ETag: completeOut.ETag,
Expiration: completeOut.Expiration,
Key: completeOut.Key,
RequestCharged: completeOut.RequestCharged,
SSEKMSKeyId: completeOut.SSEKMSKeyId,
ServerSideEncryption: completeOut.ServerSideEncryption,
VersionID: completeOut.VersionId,
}, nil
}
@ -602,19 +711,30 @@ func (u *multiuploader) send(c chunk) error {
Bucket: u.in.Bucket,
Key: u.in.Key,
Body: c.buf,
UploadId: &u.uploadID,
SSECustomerAlgorithm: u.in.SSECustomerAlgorithm,
SSECustomerKey: u.in.SSECustomerKey,
PartNumber: c.num,
SSECustomerKeyMD5: u.in.SSECustomerKeyMD5,
ExpectedBucketOwner: u.in.ExpectedBucketOwner,
RequestPayer: u.in.RequestPayer,
ChecksumAlgorithm: u.in.ChecksumAlgorithm,
// Invalid to set any of the individual ChecksumXXX members from
// PutObject as they are never valid for individual parts of a
// multipart upload.
PartNumber: c.num,
UploadId: &u.uploadID,
}
// TODO should do copy then clear?
resp, err := u.cfg.S3.UploadPart(u.ctx, params, u.cfg.ClientOptions...)
if err != nil {
return err
}
n := c.num
completed := types.CompletedPart{ETag: resp.ETag, PartNumber: n}
var completed types.CompletedPart
awsutil.Copy(&completed, resp)
completed.PartNumber = c.num
u.m.Lock()
u.parts = append(u.parts, completed)
@ -668,13 +788,12 @@ func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput {
// Parts must be sorted in PartNumber order.
sort.Sort(u.parts)
params := &s3.CompleteMultipartUploadInput{
Bucket: u.in.Bucket,
Key: u.in.Key,
UploadId: &u.uploadID,
MultipartUpload: &types.CompletedMultipartUpload{Parts: u.parts},
}
resp, err := u.cfg.S3.CompleteMultipartUpload(u.ctx, params, u.cfg.ClientOptions...)
var params s3.CompleteMultipartUploadInput
awsutil.Copy(&params, u.in)
params.UploadId = &u.uploadID
params.MultipartUpload = &types.CompletedMultipartUpload{Parts: u.parts}
resp, err := u.cfg.S3.CompleteMultipartUpload(u.ctx, &params, u.cfg.ClientOptions...)
if err != nil {
u.seterr(err)
u.fail()

View file

@ -73,3 +73,11 @@ func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r Writer
}
return r, cleanup
}
type suppressWriterAt struct {
suppressed io.Reader
}
func (s *suppressWriterAt) Read(p []byte) (n int, err error) {
return s.suppressed.Read(p)
}

View file

@ -0,0 +1,61 @@
# v1.0.14 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.13 (2022-09-14)
* **Bug Fix**: Fixes an issues where an error from an underlying SigV4 credential provider would not be surfaced from the SigV4a credential provider. Contribution by [sakthipriyan-aqfer](https://github.com/sakthipriyan-aqfer).
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.12 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.11 (2022-08-31)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.10 (2022-08-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.9 (2022-08-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.8 (2022-08-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.7 (2022-08-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.6 (2022-08-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.5 (2022-07-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.4 (2022-06-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.3 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.2 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.1 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.0 (2022-04-07)
* **Release**: New internal v4a signing module location.

View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -51,7 +51,7 @@ type SymmetricCredentialAdaptor struct {
func (s *SymmetricCredentialAdaptor) Retrieve(ctx context.Context) (aws.Credentials, error) {
symCreds, err := s.retrieveFromSymmetricProvider(ctx)
if err != nil {
return aws.Credentials{}, nil
return aws.Credentials{}, err
}
if asymCreds := s.getCreds(); asymCreds == nil {

View file

@ -0,0 +1,6 @@
// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package v4a
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.0.14"

View file

@ -5,10 +5,10 @@ import "fmt"
// ConstantTimeByteCompare is a constant-time byte comparison of x and y. This function performs an absolute comparison
// if the two byte slices assuming they represent a big-endian number.
//
// error if len(x) != len(y)
// -1 if x < y
// 0 if x == y
// +1 if x > y
// error if len(x) != len(y)
// -1 if x < y
// 0 if x == y
// +1 if x > y
func ConstantTimeByteCompare(x, y []byte) (int, error) {
if len(x) != len(y) {
return 0, fmt.Errorf("slice lengths do not match")

View file

@ -1,5 +1,3 @@
// TODO(GOSDK-1220): This signer has removed the conceptual knowledge of UNSIGNED-PAYLOAD and X-Amz-Content-Sha256
package v4a
import (
@ -22,8 +20,8 @@ import (
"strings"
"time"
signerCrypto "github.com/aws/aws-sdk-go-v2/service/s3/internal/v4a/internal/crypto"
v4Internal "github.com/aws/aws-sdk-go-v2/service/s3/internal/v4a/internal/v4"
signerCrypto "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto"
v4Internal "github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4"
"github.com/aws/smithy-go/encoding/httpbinding"
"github.com/aws/smithy-go/logging"
)
@ -440,7 +438,15 @@ func (s *httpSigner) buildCanonicalHeaders(host string, rule v4Internal.Rule, he
} else {
canonicalHeaders.WriteString(headers[i])
canonicalHeaders.WriteRune(colon)
canonicalHeaders.WriteString(strings.Join(signed[headers[i]], ","))
// Trim out leading, trailing, and dedup inner spaces from signed header values.
values := signed[headers[i]]
for j, v := range values {
cleanedValue := strings.TrimSpace(v4Internal.StripExcessSpaces(v))
canonicalHeaders.WriteString(cleanedValue)
if j < len(values)-1 {
canonicalHeaders.WriteRune(',')
}
}
}
canonicalHeaders.WriteRune('\n')
}

View file

@ -1,3 +1,55 @@
# v1.9.9 (2022-09-14)
* No change notes available for this release.
# v1.9.8 (2022-09-02)
* No change notes available for this release.
# v1.9.7 (2022-08-31)
* No change notes available for this release.
# v1.9.6 (2022-08-29)
* No change notes available for this release.
# v1.9.5 (2022-08-11)
* No change notes available for this release.
# v1.9.4 (2022-08-09)
* No change notes available for this release.
# v1.9.3 (2022-06-29)
* No change notes available for this release.
# v1.9.2 (2022-06-07)
* No change notes available for this release.
# v1.9.1 (2022-03-24)
* No change notes available for this release.
# v1.9.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.8.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.7.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.6.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
# v1.5.0 (2021-11-06)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version

View file

@ -1,7 +1,7 @@
/*
Package acceptencoding provides customizations associated with Accept Encoding Header.
Accept encoding gzip
# Accept encoding gzip
The Go HTTP client automatically supports accept-encoding and content-encoding
gzip by default. This default behavior is not desired by the SDK, and prevents
@ -18,6 +18,5 @@ client's default behavior.
An `EnableAcceptEncodingGzip` option may or may not be present depending on the client using
the below middleware. The option if present can be used to enable auto decompressing
gzip by the SDK.
*/
package acceptencoding

View file

@ -3,4 +3,4 @@
package acceptencoding
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.5.0"
const goModuleVersion = "1.9.9"

View file

@ -0,0 +1,84 @@
# v1.1.18 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.17 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.16 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.15 (2022-08-31)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.14 (2022-08-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.13 (2022-08-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.12 (2022-08-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.11 (2022-08-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.10 (2022-08-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.9 (2022-07-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.8 (2022-06-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.7 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.6 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.5 (2022-04-27)
* **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors.
# v1.1.4 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.1.0 (2022-03-08)
* **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.0.0 (2022-02-24)
* **Release**: New module for computing checksums
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions

View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,323 @@
package checksum
import (
"crypto/md5"
"crypto/sha1"
"crypto/sha256"
"encoding/base64"
"encoding/hex"
"fmt"
"hash"
"hash/crc32"
"io"
"strings"
"sync"
)
// Algorithm represents the checksum algorithms supported
type Algorithm string
// Enumeration values for supported checksum Algorithms.
const (
// AlgorithmCRC32C represents CRC32C hash algorithm
AlgorithmCRC32C Algorithm = "CRC32C"
// AlgorithmCRC32 represents CRC32 hash algorithm
AlgorithmCRC32 Algorithm = "CRC32"
// AlgorithmSHA1 represents SHA1 hash algorithm
AlgorithmSHA1 Algorithm = "SHA1"
// AlgorithmSHA256 represents SHA256 hash algorithm
AlgorithmSHA256 Algorithm = "SHA256"
)
var supportedAlgorithms = []Algorithm{
AlgorithmCRC32C,
AlgorithmCRC32,
AlgorithmSHA1,
AlgorithmSHA256,
}
func (a Algorithm) String() string { return string(a) }
// ParseAlgorithm attempts to parse the provided value into a checksum
// algorithm, matching without case. Returns the algorithm matched, or an error
// if the algorithm wasn't matched.
func ParseAlgorithm(v string) (Algorithm, error) {
for _, a := range supportedAlgorithms {
if strings.EqualFold(string(a), v) {
return a, nil
}
}
return "", fmt.Errorf("unknown checksum algorithm, %v", v)
}
// FilterSupportedAlgorithms filters the set of algorithms, returning a slice
// of algorithms that are supported.
func FilterSupportedAlgorithms(vs []string) []Algorithm {
found := map[Algorithm]struct{}{}
supported := make([]Algorithm, 0, len(supportedAlgorithms))
for _, v := range vs {
for _, a := range supportedAlgorithms {
// Only consider algorithms that are supported
if !strings.EqualFold(v, string(a)) {
continue
}
// Ignore duplicate algorithms in list.
if _, ok := found[a]; ok {
continue
}
supported = append(supported, a)
found[a] = struct{}{}
}
}
return supported
}
// NewAlgorithmHash returns a hash.Hash for the checksum algorithm. Error is
// returned if the algorithm is unknown.
func NewAlgorithmHash(v Algorithm) (hash.Hash, error) {
switch v {
case AlgorithmSHA1:
return sha1.New(), nil
case AlgorithmSHA256:
return sha256.New(), nil
case AlgorithmCRC32:
return crc32.NewIEEE(), nil
case AlgorithmCRC32C:
return crc32.New(crc32.MakeTable(crc32.Castagnoli)), nil
default:
return nil, fmt.Errorf("unknown checksum algorithm, %v", v)
}
}
// AlgorithmChecksumLength returns the length of the algorithm's checksum in
// bytes. If the algorithm is not known, an error is returned.
func AlgorithmChecksumLength(v Algorithm) (int, error) {
switch v {
case AlgorithmSHA1:
return sha1.Size, nil
case AlgorithmSHA256:
return sha256.Size, nil
case AlgorithmCRC32:
return crc32.Size, nil
case AlgorithmCRC32C:
return crc32.Size, nil
default:
return 0, fmt.Errorf("unknown checksum algorithm, %v", v)
}
}
const awsChecksumHeaderPrefix = "x-amz-checksum-"
// AlgorithmHTTPHeader returns the HTTP header for the algorithm's hash.
func AlgorithmHTTPHeader(v Algorithm) string {
return awsChecksumHeaderPrefix + strings.ToLower(string(v))
}
// base64EncodeHashSum computes base64 encoded checksum of a given running
// hash. The running hash must already have content written to it. Returns the
// byte slice of checksum and an error
func base64EncodeHashSum(h hash.Hash) []byte {
sum := h.Sum(nil)
sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
base64.StdEncoding.Encode(sum64, sum)
return sum64
}
// hexEncodeHashSum computes hex encoded checksum of a given running hash. The
// running hash must already have content written to it. Returns the byte slice
// of checksum and an error
func hexEncodeHashSum(h hash.Hash) []byte {
sum := h.Sum(nil)
sumHex := make([]byte, hex.EncodedLen(len(sum)))
hex.Encode(sumHex, sum)
return sumHex
}
// computeMD5Checksum computes base64 MD5 checksum of an io.Reader's contents.
// Returns the byte slice of MD5 checksum and an error.
func computeMD5Checksum(r io.Reader) ([]byte, error) {
h := md5.New()
// Copy errors may be assumed to be from the body.
if _, err := io.Copy(h, r); err != nil {
return nil, fmt.Errorf("failed compute MD5 hash of reader, %w", err)
}
// Encode the MD5 checksum in base64.
return base64EncodeHashSum(h), nil
}
// computeChecksumReader provides a reader wrapping an underlying io.Reader to
// compute the checksum of the stream's bytes.
type computeChecksumReader struct {
stream io.Reader
algorithm Algorithm
hasher hash.Hash
base64ChecksumLen int
mux sync.RWMutex
lockedChecksum string
lockedErr error
}
// newComputeChecksumReader returns a computeChecksumReader for the stream and
// algorithm specified. Returns error if unable to create the reader, or
// algorithm is unknown.
func newComputeChecksumReader(stream io.Reader, algorithm Algorithm) (*computeChecksumReader, error) {
hasher, err := NewAlgorithmHash(algorithm)
if err != nil {
return nil, err
}
checksumLength, err := AlgorithmChecksumLength(algorithm)
if err != nil {
return nil, err
}
return &computeChecksumReader{
stream: io.TeeReader(stream, hasher),
algorithm: algorithm,
hasher: hasher,
base64ChecksumLen: base64.StdEncoding.EncodedLen(checksumLength),
}, nil
}
// Read wraps the underlying reader. When the underlying reader returns EOF,
// the checksum of the reader will be computed, and can be retrieved with
// ChecksumBase64String.
func (r *computeChecksumReader) Read(p []byte) (int, error) {
n, err := r.stream.Read(p)
if err == nil {
return n, nil
} else if err != io.EOF {
r.mux.Lock()
defer r.mux.Unlock()
r.lockedErr = err
return n, err
}
b := base64EncodeHashSum(r.hasher)
r.mux.Lock()
defer r.mux.Unlock()
r.lockedChecksum = string(b)
return n, err
}
func (r *computeChecksumReader) Algorithm() Algorithm {
return r.algorithm
}
// Base64ChecksumLength returns the base64 encoded length of the checksum for
// algorithm.
func (r *computeChecksumReader) Base64ChecksumLength() int {
return r.base64ChecksumLen
}
// Base64Checksum returns the base64 checksum for the algorithm, or error if
// the underlying reader returned a non-EOF error.
//
// Safe to be called concurrently, but will return an error until after the
// underlying reader is returns EOF.
func (r *computeChecksumReader) Base64Checksum() (string, error) {
r.mux.RLock()
defer r.mux.RUnlock()
if r.lockedErr != nil {
return "", r.lockedErr
}
if r.lockedChecksum == "" {
return "", fmt.Errorf(
"checksum not available yet, called before reader returns EOF",
)
}
return r.lockedChecksum, nil
}
// validateChecksumReader implements io.ReadCloser interface. The wrapper
// performs checksum validation when the underlying reader has been fully read.
type validateChecksumReader struct {
originalBody io.ReadCloser
body io.Reader
hasher hash.Hash
algorithm Algorithm
expectChecksum string
}
// newValidateChecksumReader returns a configured io.ReadCloser that performs
// checksum validation when the underlying reader has been fully read.
func newValidateChecksumReader(
body io.ReadCloser,
algorithm Algorithm,
expectChecksum string,
) (*validateChecksumReader, error) {
hasher, err := NewAlgorithmHash(algorithm)
if err != nil {
return nil, err
}
return &validateChecksumReader{
originalBody: body,
body: io.TeeReader(body, hasher),
hasher: hasher,
algorithm: algorithm,
expectChecksum: expectChecksum,
}, nil
}
// Read attempts to read from the underlying stream while also updating the
// running hash. If the underlying stream returns with an EOF error, the
// checksum of the stream will be collected, and compared against the expected
// checksum. If the checksums do not match, an error will be returned.
//
// If a non-EOF error occurs when reading the underlying stream, that error
// will be returned and the checksum for the stream will be discarded.
func (c *validateChecksumReader) Read(p []byte) (n int, err error) {
n, err = c.body.Read(p)
if err == io.EOF {
if checksumErr := c.validateChecksum(); checksumErr != nil {
return n, checksumErr
}
}
return n, err
}
// Close closes the underlying reader, returning any error that occurred in the
// underlying reader.
func (c *validateChecksumReader) Close() (err error) {
return c.originalBody.Close()
}
func (c *validateChecksumReader) validateChecksum() error {
// Compute base64 encoded checksum hash of the payload's read bytes.
v := base64EncodeHashSum(c.hasher)
if e, a := c.expectChecksum, string(v); !strings.EqualFold(e, a) {
return validationError{
Algorithm: c.algorithm, Expect: e, Actual: a,
}
}
return nil
}
type validationError struct {
Algorithm Algorithm
Expect string
Actual string
}
func (v validationError) Error() string {
return fmt.Sprintf("checksum did not match: algorithm %v, expect %v, actual %v",
v.Algorithm, v.Expect, v.Actual)
}

View file

@ -0,0 +1,389 @@
package checksum
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
)
const (
crlf = "\r\n"
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html
defaultChunkLength = 1024 * 64
awsTrailerHeaderName = "x-amz-trailer"
decodedContentLengthHeaderName = "x-amz-decoded-content-length"
contentEncodingHeaderName = "content-encoding"
awsChunkedContentEncodingHeaderValue = "aws-chunked"
trailerKeyValueSeparator = ":"
)
var (
crlfBytes = []byte(crlf)
finalChunkBytes = []byte("0" + crlf)
)
type awsChunkedEncodingOptions struct {
// The total size of the stream. For unsigned encoding this implies that
// there will only be a single chunk containing the underlying payload,
// unless ChunkLength is also specified.
StreamLength int64
// Set of trailer key:value pairs that will be appended to the end of the
// payload after the end chunk has been written.
Trailers map[string]awsChunkedTrailerValue
// The maximum size of each chunk to be sent. Default value of -1, signals
// that optimal chunk length will be used automatically. ChunkSize must be
// at least 8KB.
//
// If ChunkLength and StreamLength are both specified, the stream will be
// broken up into ChunkLength chunks. The encoded length of the aws-chunked
// encoding can still be determined as long as all trailers, if any, have a
// fixed length.
ChunkLength int
}
type awsChunkedTrailerValue struct {
// Function to retrieve the value of the trailer. Will only be called after
// the underlying stream returns EOF error.
Get func() (string, error)
// If the length of the value can be pre-determined, and is constant
// specify the length. A value of -1 means the length is unknown, or
// cannot be pre-determined.
Length int
}
// awsChunkedEncoding provides a reader that wraps the payload such that
// payload is read as a single aws-chunk payload. This reader can only be used
// if the content length of payload is known. Content-Length is used as size of
// the single payload chunk. The final chunk and trailing checksum is appended
// at the end.
//
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#sigv4-chunked-body-definition
//
// Here is the aws-chunked payload stream as read from the awsChunkedEncoding
// if original request stream is "Hello world", and checksum hash used is SHA256
//
// <b>\r\n
// Hello world\r\n
// 0\r\n
// x-amz-checksum-sha256:ZOyIygCyaOW6GjVnihtTFtIS9PNmskdyMlNKiuyjfzw=\r\n
// \r\n
type awsChunkedEncoding struct {
options awsChunkedEncodingOptions
encodedStream io.Reader
trailerEncodedLength int
}
// newUnsignedAWSChunkedEncoding returns a new awsChunkedEncoding configured
// for unsigned aws-chunked content encoding. Any additional trailers that need
// to be appended after the end chunk must be included as via Trailer
// callbacks.
func newUnsignedAWSChunkedEncoding(
stream io.Reader,
optFns ...func(*awsChunkedEncodingOptions),
) *awsChunkedEncoding {
options := awsChunkedEncodingOptions{
Trailers: map[string]awsChunkedTrailerValue{},
StreamLength: -1,
ChunkLength: -1,
}
for _, fn := range optFns {
fn(&options)
}
var chunkReader io.Reader
if options.ChunkLength != -1 || options.StreamLength == -1 {
if options.ChunkLength == -1 {
options.ChunkLength = defaultChunkLength
}
chunkReader = newBufferedAWSChunkReader(stream, options.ChunkLength)
} else {
chunkReader = newUnsignedChunkReader(stream, options.StreamLength)
}
trailerReader := newAWSChunkedTrailerReader(options.Trailers)
return &awsChunkedEncoding{
options: options,
encodedStream: io.MultiReader(chunkReader,
trailerReader,
bytes.NewBuffer(crlfBytes),
),
trailerEncodedLength: trailerReader.EncodedLength(),
}
}
// EncodedLength returns the final length of the aws-chunked content encoded
// stream if it can be determined without reading the underlying stream or lazy
// header values, otherwise -1 is returned.
func (e *awsChunkedEncoding) EncodedLength() int64 {
var length int64
if e.options.StreamLength == -1 || e.trailerEncodedLength == -1 {
return -1
}
if e.options.StreamLength != 0 {
// If the stream length is known, and there is no chunk length specified,
// only a single chunk will be used. Otherwise the stream length needs to
// include the multiple chunk padding content.
if e.options.ChunkLength == -1 {
length += getUnsignedChunkBytesLength(e.options.StreamLength)
} else {
// Compute chunk header and payload length
numChunks := e.options.StreamLength / int64(e.options.ChunkLength)
length += numChunks * getUnsignedChunkBytesLength(int64(e.options.ChunkLength))
if remainder := e.options.StreamLength % int64(e.options.ChunkLength); remainder != 0 {
length += getUnsignedChunkBytesLength(remainder)
}
}
}
// End chunk
length += int64(len(finalChunkBytes))
// Trailers
length += int64(e.trailerEncodedLength)
// Encoding terminator
length += int64(len(crlf))
return length
}
func getUnsignedChunkBytesLength(payloadLength int64) int64 {
payloadLengthStr := strconv.FormatInt(payloadLength, 16)
return int64(len(payloadLengthStr)) + int64(len(crlf)) + payloadLength + int64(len(crlf))
}
// HTTPHeaders returns the set of headers that must be included the request for
// aws-chunked to work. This includes the content-encoding: aws-chunked header.
//
// If there are multiple layered content encoding, the aws-chunked encoding
// must be appended to the previous layers the stream's encoding. The best way
// to do this is to append all header values returned to the HTTP request's set
// of headers.
func (e *awsChunkedEncoding) HTTPHeaders() map[string][]string {
headers := map[string][]string{
contentEncodingHeaderName: {
awsChunkedContentEncodingHeaderValue,
},
}
if len(e.options.Trailers) != 0 {
trailers := make([]string, 0, len(e.options.Trailers))
for name := range e.options.Trailers {
trailers = append(trailers, strings.ToLower(name))
}
headers[awsTrailerHeaderName] = trailers
}
return headers
}
func (e *awsChunkedEncoding) Read(b []byte) (n int, err error) {
return e.encodedStream.Read(b)
}
// awsChunkedTrailerReader provides a lazy reader for reading of aws-chunked
// content encoded trailers. The trailer values will not be retrieved until the
// reader is read from.
type awsChunkedTrailerReader struct {
reader *bytes.Buffer
trailers map[string]awsChunkedTrailerValue
trailerEncodedLength int
}
// newAWSChunkedTrailerReader returns an initialized awsChunkedTrailerReader to
// lazy reading aws-chunk content encoded trailers.
func newAWSChunkedTrailerReader(trailers map[string]awsChunkedTrailerValue) *awsChunkedTrailerReader {
return &awsChunkedTrailerReader{
trailers: trailers,
trailerEncodedLength: trailerEncodedLength(trailers),
}
}
func trailerEncodedLength(trailers map[string]awsChunkedTrailerValue) (length int) {
for name, trailer := range trailers {
length += len(name) + len(trailerKeyValueSeparator)
l := trailer.Length
if l == -1 {
return -1
}
length += l + len(crlf)
}
return length
}
// EncodedLength returns the length of the encoded trailers if the length could
// be determined without retrieving the header values. Returns -1 if length is
// unknown.
func (r *awsChunkedTrailerReader) EncodedLength() (length int) {
return r.trailerEncodedLength
}
// Read populates the passed in byte slice with bytes from the encoded
// trailers. Will lazy read header values first time Read is called.
func (r *awsChunkedTrailerReader) Read(p []byte) (int, error) {
if r.trailerEncodedLength == 0 {
return 0, io.EOF
}
if r.reader == nil {
trailerLen := r.trailerEncodedLength
if r.trailerEncodedLength == -1 {
trailerLen = 0
}
r.reader = bytes.NewBuffer(make([]byte, 0, trailerLen))
for name, trailer := range r.trailers {
r.reader.WriteString(name)
r.reader.WriteString(trailerKeyValueSeparator)
v, err := trailer.Get()
if err != nil {
return 0, fmt.Errorf("failed to get trailer value, %w", err)
}
r.reader.WriteString(v)
r.reader.WriteString(crlf)
}
}
return r.reader.Read(p)
}
// newUnsignedChunkReader returns an io.Reader encoding the underlying reader
// as unsigned aws-chunked chunks. The returned reader will also include the
// end chunk, but not the aws-chunked final `crlf` segment so trailers can be
// added.
//
// If the payload size is -1 for unknown length the content will be buffered in
// defaultChunkLength chunks before wrapped in aws-chunked chunk encoding.
func newUnsignedChunkReader(reader io.Reader, payloadSize int64) io.Reader {
if payloadSize == -1 {
return newBufferedAWSChunkReader(reader, defaultChunkLength)
}
var endChunk bytes.Buffer
if payloadSize == 0 {
endChunk.Write(finalChunkBytes)
return &endChunk
}
endChunk.WriteString(crlf)
endChunk.Write(finalChunkBytes)
var header bytes.Buffer
header.WriteString(strconv.FormatInt(payloadSize, 16))
header.WriteString(crlf)
return io.MultiReader(
&header,
reader,
&endChunk,
)
}
// Provides a buffered aws-chunked chunk encoder of an underlying io.Reader.
// Will include end chunk, but not the aws-chunked final `crlf` segment so
// trailers can be added.
//
// Note does not implement support for chunk extensions, e.g. chunk signing.
type bufferedAWSChunkReader struct {
reader io.Reader
chunkSize int
chunkSizeStr string
headerBuffer *bytes.Buffer
chunkBuffer *bytes.Buffer
multiReader io.Reader
multiReaderLen int
endChunkDone bool
}
// newBufferedAWSChunkReader returns an bufferedAWSChunkReader for reading
// aws-chunked encoded chunks.
func newBufferedAWSChunkReader(reader io.Reader, chunkSize int) *bufferedAWSChunkReader {
return &bufferedAWSChunkReader{
reader: reader,
chunkSize: chunkSize,
chunkSizeStr: strconv.FormatInt(int64(chunkSize), 16),
headerBuffer: bytes.NewBuffer(make([]byte, 0, 64)),
chunkBuffer: bytes.NewBuffer(make([]byte, 0, chunkSize+len(crlf))),
}
}
// Read attempts to read from the underlying io.Reader writing aws-chunked
// chunk encoded bytes to p. When the underlying io.Reader has been completed
// read the end chunk will be available. Once the end chunk is read, the reader
// will return EOF.
func (r *bufferedAWSChunkReader) Read(p []byte) (n int, err error) {
if r.multiReaderLen == 0 && r.endChunkDone {
return 0, io.EOF
}
if r.multiReader == nil || r.multiReaderLen == 0 {
r.multiReader, r.multiReaderLen, err = r.newMultiReader()
if err != nil {
return 0, err
}
}
n, err = r.multiReader.Read(p)
r.multiReaderLen -= n
if err == io.EOF && !r.endChunkDone {
// Edge case handling when the multi-reader has been completely read,
// and returned an EOF, make sure that EOF only gets returned if the
// end chunk was included in the multi-reader. Otherwise, the next call
// to read will initialize the next chunk's multi-reader.
err = nil
}
return n, err
}
// newMultiReader returns a new io.Reader for wrapping the next chunk. Will
// return an error if the underlying reader can not be read from. Will never
// return io.EOF.
func (r *bufferedAWSChunkReader) newMultiReader() (io.Reader, int, error) {
// io.Copy eats the io.EOF returned by io.LimitReader. Any error that
// occurs here is due to an actual read error.
n, err := io.Copy(r.chunkBuffer, io.LimitReader(r.reader, int64(r.chunkSize)))
if err != nil {
return nil, 0, err
}
if n == 0 {
// Early exit writing out only the end chunk. This does not include
// aws-chunk's final `crlf` so that trailers can still be added by
// upstream reader.
r.headerBuffer.Reset()
r.headerBuffer.WriteString("0")
r.headerBuffer.WriteString(crlf)
r.endChunkDone = true
return r.headerBuffer, r.headerBuffer.Len(), nil
}
r.chunkBuffer.WriteString(crlf)
chunkSizeStr := r.chunkSizeStr
if int(n) != r.chunkSize {
chunkSizeStr = strconv.FormatInt(n, 16)
}
r.headerBuffer.Reset()
r.headerBuffer.WriteString(chunkSizeStr)
r.headerBuffer.WriteString(crlf)
return io.MultiReader(
r.headerBuffer,
r.chunkBuffer,
), r.headerBuffer.Len() + r.chunkBuffer.Len(), nil
}

View file

@ -0,0 +1,6 @@
// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
package checksum
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.1.18"

View file

@ -0,0 +1,185 @@
package checksum
import (
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// InputMiddlewareOptions provides the options for the request
// checksum middleware setup.
type InputMiddlewareOptions struct {
// GetAlgorithm is a function to get the checksum algorithm of the
// input payload from the input parameters.
//
// Given the input parameter value, the function must return the algorithm
// and true, or false if no algorithm is specified.
GetAlgorithm func(interface{}) (string, bool)
// Forces the middleware to compute the input payload's checksum. The
// request will fail if the algorithm is not specified or unable to compute
// the checksum.
RequireChecksum bool
// Enables support for wrapping the serialized input payload with a
// content-encoding: aws-check wrapper, and including a trailer for the
// algorithm's checksum value.
//
// The checksum will not be computed, nor added as trailing checksum, if
// the Algorithm's header is already set on the request.
EnableTrailingChecksum bool
// Enables support for computing the SHA256 checksum of input payloads
// along with the algorithm specified checksum. Prevents downstream
// middleware handlers (computePayloadSHA256) re-reading the payload.
//
// The SHA256 payload checksum will only be used for computed for requests
// that are not TLS, or do not enable trailing checksums.
//
// The SHA256 payload hash will not be computed, if the Algorithm's header
// is already set on the request.
EnableComputeSHA256PayloadHash bool
// Enables support for setting the aws-chunked decoded content length
// header for the decoded length of the underlying stream. Will only be set
// when used with trailing checksums, and aws-chunked content-encoding.
EnableDecodedContentLengthHeader bool
}
// AddInputMiddleware adds the middleware for performing checksum computing
// of request payloads, and checksum validation of response payloads.
func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions) (err error) {
// TODO ensure this works correctly with presigned URLs
// Middleware stack:
// * (OK)(Initialize) --none--
// * (OK)(Serialize) EndpointResolver
// * (OK)(Build) ComputeContentLength
// * (AD)(Build) Header ComputeInputPayloadChecksum
// * SIGNED Payload - If HTTP && not support trailing checksum
// * UNSIGNED Payload - If HTTPS && not support trailing checksum
// * (RM)(Build) ContentChecksum - OK to remove
// * (OK)(Build) ComputePayloadHash
// * v4.dynamicPayloadSigningMiddleware
// * v4.computePayloadSHA256
// * v4.unsignedPayload
// (OK)(Build) Set computedPayloadHash header
// * (OK)(Finalize) Retry
// * (AD)(Finalize) Trailer ComputeInputPayloadChecksum,
// * Requires HTTPS && support trailing checksum
// * UNSIGNED Payload
// * Finalize run if HTTPS && support trailing checksum
// * (OK)(Finalize) Signing
// * (OK)(Deserialize) --none--
// Initial checksum configuration look up middleware
err = stack.Initialize.Add(&setupInputContext{
GetAlgorithm: options.GetAlgorithm,
}, middleware.Before)
if err != nil {
return err
}
stack.Build.Remove("ContentChecksum")
// Create the compute checksum middleware that will be added as both a
// build and finalize handler.
inputChecksum := &computeInputPayloadChecksum{
RequireChecksum: options.RequireChecksum,
EnableTrailingChecksum: options.EnableTrailingChecksum,
EnableComputePayloadHash: options.EnableComputeSHA256PayloadHash,
EnableDecodedContentLengthHeader: options.EnableDecodedContentLengthHeader,
}
// Insert header checksum after ComputeContentLength middleware, must also
// be before the computePayloadHash middleware handlers.
err = stack.Build.Insert(inputChecksum,
(*smithyhttp.ComputeContentLength)(nil).ID(),
middleware.After)
if err != nil {
return err
}
// If trailing checksum is not supported no need for finalize handler to be added.
if options.EnableTrailingChecksum {
err = stack.Finalize.Insert(inputChecksum, "Retry", middleware.After)
if err != nil {
return err
}
}
return nil
}
// RemoveInputMiddleware Removes the compute input payload checksum middleware
// handlers from the stack.
func RemoveInputMiddleware(stack *middleware.Stack) {
id := (*setupInputContext)(nil).ID()
stack.Initialize.Remove(id)
id = (*computeInputPayloadChecksum)(nil).ID()
stack.Build.Remove(id)
stack.Finalize.Remove(id)
}
// OutputMiddlewareOptions provides options for configuring output checksum
// validation middleware.
type OutputMiddlewareOptions struct {
// GetValidationMode is a function to get the checksum validation
// mode of the output payload from the input parameters.
//
// Given the input parameter value, the function must return the validation
// mode and true, or false if no mode is specified.
GetValidationMode func(interface{}) (string, bool)
// The set of checksum algorithms that should be used for response payload
// checksum validation. The algorithm(s) used will be a union of the
// output's returned algorithms and this set.
//
// Only the first algorithm in the union is currently used.
ValidationAlgorithms []string
// If set the middleware will ignore output multipart checksums. Otherwise
// an checksum format error will be returned by the middleware.
IgnoreMultipartValidation bool
// When set the middleware will log when output does not have checksum or
// algorithm to validate.
LogValidationSkipped bool
// When set the middleware will log when the output contains a multipart
// checksum that was, skipped and not validated.
LogMultipartValidationSkipped bool
}
// AddOutputMiddleware adds the middleware for validating response payload's
// checksum.
func AddOutputMiddleware(stack *middleware.Stack, options OutputMiddlewareOptions) error {
err := stack.Initialize.Add(&setupOutputContext{
GetValidationMode: options.GetValidationMode,
}, middleware.Before)
if err != nil {
return err
}
// Resolve a supported priority order list of algorithms to validate.
algorithms := FilterSupportedAlgorithms(options.ValidationAlgorithms)
m := &validateOutputPayloadChecksum{
Algorithms: algorithms,
IgnoreMultipartValidation: options.IgnoreMultipartValidation,
LogMultipartValidationSkipped: options.LogMultipartValidationSkipped,
LogValidationSkipped: options.LogValidationSkipped,
}
return stack.Deserialize.Add(m, middleware.After)
}
// RemoveOutputMiddleware Removes the compute input payload checksum middleware
// handlers from the stack.
func RemoveOutputMiddleware(stack *middleware.Stack) {
id := (*setupOutputContext)(nil).ID()
stack.Initialize.Remove(id)
id = (*validateOutputPayloadChecksum)(nil).ID()
stack.Deserialize.Remove(id)
}

View file

@ -0,0 +1,479 @@
package checksum
import (
"context"
"crypto/sha256"
"fmt"
"hash"
"io"
"strconv"
v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
const (
contentMD5Header = "Content-Md5"
streamingUnsignedPayloadTrailerPayloadHash = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
)
// computedInputChecksumsKey is the metadata key for recording the algorithm the
// checksum was computed for and the checksum value.
type computedInputChecksumsKey struct{}
// GetComputedInputChecksums returns the map of checksum algorithm to their
// computed value stored in the middleware Metadata. Returns false if no values
// were stored in the Metadata.
func GetComputedInputChecksums(m middleware.Metadata) (map[string]string, bool) {
vs, ok := m.Get(computedInputChecksumsKey{}).(map[string]string)
return vs, ok
}
// SetComputedInputChecksums stores the map of checksum algorithm to their
// computed value in the middleware Metadata. Overwrites any values that
// currently exist in the metadata.
func SetComputedInputChecksums(m *middleware.Metadata, vs map[string]string) {
m.Set(computedInputChecksumsKey{}, vs)
}
// computeInputPayloadChecksum middleware computes payload checksum
type computeInputPayloadChecksum struct {
// Enables support for wrapping the serialized input payload with a
// content-encoding: aws-check wrapper, and including a trailer for the
// algorithm's checksum value.
//
// The checksum will not be computed, nor added as trailing checksum, if
// the Algorithm's header is already set on the request.
EnableTrailingChecksum bool
// States that a checksum is required to be included for the operation. If
// Input does not specify a checksum, fallback to built in MD5 checksum is
// used.
//
// Replaces smithy-go's ContentChecksum middleware.
RequireChecksum bool
// Enables support for computing the SHA256 checksum of input payloads
// along with the algorithm specified checksum. Prevents downstream
// middleware handlers (computePayloadSHA256) re-reading the payload.
//
// The SHA256 payload hash will only be used for computed for requests
// that are not TLS, or do not enable trailing checksums.
//
// The SHA256 payload hash will not be computed, if the Algorithm's header
// is already set on the request.
EnableComputePayloadHash bool
// Enables support for setting the aws-chunked decoded content length
// header for the decoded length of the underlying stream. Will only be set
// when used with trailing checksums, and aws-chunked content-encoding.
EnableDecodedContentLengthHeader bool
buildHandlerRun bool
deferToFinalizeHandler bool
}
// ID provides the middleware's identifier.
func (m *computeInputPayloadChecksum) ID() string {
return "AWSChecksum:ComputeInputPayloadChecksum"
}
type computeInputHeaderChecksumError struct {
Msg string
Err error
}
func (e computeInputHeaderChecksumError) Error() string {
const intro = "compute input header checksum failed"
if e.Err != nil {
return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err)
}
return fmt.Sprintf("%s, %s", intro, e.Msg)
}
func (e computeInputHeaderChecksumError) Unwrap() error { return e.Err }
// HandleBuild handles computing the payload's checksum, in the following cases:
// - Is HTTP, not HTTPS
// - RequireChecksum is true, and no checksums were specified via the Input
// - Trailing checksums are not supported
//
// The build handler must be inserted in the stack before ContentPayloadHash
// and after ComputeContentLength.
func (m *computeInputPayloadChecksum) HandleBuild(
ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
) (
out middleware.BuildOutput, metadata middleware.Metadata, err error,
) {
m.buildHandlerRun = true
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, computeInputHeaderChecksumError{
Msg: fmt.Sprintf("unknown request type %T", req),
}
}
var algorithm Algorithm
var checksum string
defer func() {
if algorithm == "" || checksum == "" || err != nil {
return
}
// Record the checksum and algorithm that was computed
SetComputedInputChecksums(&metadata, map[string]string{
string(algorithm): checksum,
})
}()
// If no algorithm was specified, and the operation requires a checksum,
// fallback to the legacy content MD5 checksum.
algorithm, ok, err = getInputAlgorithm(ctx)
if err != nil {
return out, metadata, err
} else if !ok {
if m.RequireChecksum {
checksum, err = setMD5Checksum(ctx, req)
if err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to compute stream's MD5 checksum",
Err: err,
}
}
algorithm = Algorithm("MD5")
}
return next.HandleBuild(ctx, in)
}
// If the checksum header is already set nothing to do.
checksumHeader := AlgorithmHTTPHeader(algorithm)
if checksum = req.Header.Get(checksumHeader); checksum != "" {
return next.HandleBuild(ctx, in)
}
computePayloadHash := m.EnableComputePayloadHash
if v := v4.GetPayloadHash(ctx); v != "" {
computePayloadHash = false
}
stream := req.GetStream()
streamLength, err := getRequestStreamLength(req)
if err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to determine stream length",
Err: err,
}
}
// If trailing checksums are supported, the request is HTTPS, and the
// stream is not nil or empty, there is nothing to do in the build stage.
// The checksum will be added to the request as a trailing checksum in the
// finalize handler.
//
// Nil and empty streams will always be handled as a request header,
// regardless if the operation supports trailing checksums or not.
if req.IsHTTPS() {
if stream != nil && streamLength != 0 && m.EnableTrailingChecksum {
if m.EnableComputePayloadHash {
// payload hash is set as header in Build middleware handler,
// ContentSHA256Header.
ctx = v4.SetPayloadHash(ctx, streamingUnsignedPayloadTrailerPayloadHash)
}
m.deferToFinalizeHandler = true
return next.HandleBuild(ctx, in)
}
// If trailing checksums are not enabled but protocol is still HTTPS
// disabling computing the payload hash. Downstream middleware handler
// (ComputetPayloadHash) will set the payload hash to unsigned payload,
// if signing was used.
computePayloadHash = false
}
// Only seekable streams are supported for non-trailing checksums, because
// the stream needs to be rewound before the handler can continue.
if stream != nil && !req.IsStreamSeekable() {
return out, metadata, computeInputHeaderChecksumError{
Msg: "unseekable stream is not supported without TLS and trailing checksum",
}
}
var sha256Checksum string
checksum, sha256Checksum, err = computeStreamChecksum(
algorithm, stream, computePayloadHash)
if err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to compute stream checksum",
Err: err,
}
}
if err := req.RewindStream(); err != nil {
return out, metadata, computeInputHeaderChecksumError{
Msg: "failed to rewind stream",
Err: err,
}
}
req.Header.Set(checksumHeader, checksum)
if computePayloadHash {
ctx = v4.SetPayloadHash(ctx, sha256Checksum)
}
return next.HandleBuild(ctx, in)
}
type computeInputTrailingChecksumError struct {
Msg string
Err error
}
func (e computeInputTrailingChecksumError) Error() string {
const intro = "compute input trailing checksum failed"
if e.Err != nil {
return fmt.Sprintf("%s, %s, %v", intro, e.Msg, e.Err)
}
return fmt.Sprintf("%s, %s", intro, e.Msg)
}
func (e computeInputTrailingChecksumError) Unwrap() error { return e.Err }
// HandleFinalize handles computing the payload's checksum, in the following cases:
// - Is HTTPS, not HTTP
// - A checksum was specified via the Input
// - Trailing checksums are supported.
//
// The finalize handler must be inserted in the stack before Signing, and after Retry.
func (m *computeInputPayloadChecksum) HandleFinalize(
ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
if !m.deferToFinalizeHandler {
if !m.buildHandlerRun {
return out, metadata, computeInputTrailingChecksumError{
Msg: "build handler was removed without also removing finalize handler",
}
}
return next.HandleFinalize(ctx, in)
}
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, computeInputTrailingChecksumError{
Msg: fmt.Sprintf("unknown request type %T", req),
}
}
// Trailing checksums are only supported when TLS is enabled.
if !req.IsHTTPS() {
return out, metadata, computeInputTrailingChecksumError{
Msg: "HTTPS required",
}
}
// If no algorithm was specified, there is nothing to do.
algorithm, ok, err := getInputAlgorithm(ctx)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed to get algorithm",
Err: err,
}
} else if !ok {
return out, metadata, computeInputTrailingChecksumError{
Msg: "no algorithm specified",
}
}
// If the checksum header is already set before finalize could run, there
// is nothing to do.
checksumHeader := AlgorithmHTTPHeader(algorithm)
if req.Header.Get(checksumHeader) != "" {
return next.HandleFinalize(ctx, in)
}
stream := req.GetStream()
streamLength, err := getRequestStreamLength(req)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed to determine stream length",
Err: err,
}
}
if stream == nil || streamLength == 0 {
// Nil and empty streams are handled by the Build handler. They are not
// supported by the trailing checksums finalize handler. There is no
// benefit to sending them as trailers compared to headers.
return out, metadata, computeInputTrailingChecksumError{
Msg: "nil or empty streams are not supported",
}
}
checksumReader, err := newComputeChecksumReader(stream, algorithm)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed to created checksum reader",
Err: err,
}
}
awsChunkedReader := newUnsignedAWSChunkedEncoding(checksumReader,
func(o *awsChunkedEncodingOptions) {
o.Trailers[AlgorithmHTTPHeader(checksumReader.Algorithm())] = awsChunkedTrailerValue{
Get: checksumReader.Base64Checksum,
Length: checksumReader.Base64ChecksumLength(),
}
o.StreamLength = streamLength
})
for key, values := range awsChunkedReader.HTTPHeaders() {
for _, value := range values {
req.Header.Add(key, value)
}
}
// Setting the stream on the request will create a copy. The content length
// is not updated until after the request is copied to prevent impacting
// upstream middleware.
req, err = req.SetStream(awsChunkedReader)
if err != nil {
return out, metadata, computeInputTrailingChecksumError{
Msg: "failed updating request to trailing checksum wrapped stream",
Err: err,
}
}
req.ContentLength = awsChunkedReader.EncodedLength()
in.Request = req
// Add decoded content length header if original stream's content length is known.
if streamLength != -1 && m.EnableDecodedContentLengthHeader {
req.Header.Set(decodedContentLengthHeaderName, strconv.FormatInt(streamLength, 10))
}
out, metadata, err = next.HandleFinalize(ctx, in)
if err == nil {
checksum, err := checksumReader.Base64Checksum()
if err != nil {
return out, metadata, fmt.Errorf("failed to get computed checksum, %w", err)
}
// Record the checksum and algorithm that was computed
SetComputedInputChecksums(&metadata, map[string]string{
string(algorithm): checksum,
})
}
return out, metadata, err
}
func getInputAlgorithm(ctx context.Context) (Algorithm, bool, error) {
ctxAlgorithm := getContextInputAlgorithm(ctx)
if ctxAlgorithm == "" {
return "", false, nil
}
algorithm, err := ParseAlgorithm(ctxAlgorithm)
if err != nil {
return "", false, fmt.Errorf(
"failed to parse algorithm, %w", err)
}
return algorithm, true, nil
}
func computeStreamChecksum(algorithm Algorithm, stream io.Reader, computePayloadHash bool) (
checksum string, sha256Checksum string, err error,
) {
hasher, err := NewAlgorithmHash(algorithm)
if err != nil {
return "", "", fmt.Errorf(
"failed to get hasher for checksum algorithm, %w", err)
}
var sha256Hasher hash.Hash
var batchHasher io.Writer = hasher
// Compute payload hash for the protocol. To prevent another handler
// (computePayloadSHA256) re-reading body also compute the SHA256 for
// request signing. If configured checksum algorithm is SHA256, don't
// double wrap stream with another SHA256 hasher.
if computePayloadHash && algorithm != AlgorithmSHA256 {
sha256Hasher = sha256.New()
batchHasher = io.MultiWriter(hasher, sha256Hasher)
}
if stream != nil {
if _, err = io.Copy(batchHasher, stream); err != nil {
return "", "", fmt.Errorf(
"failed to read stream to compute hash, %w", err)
}
}
checksum = string(base64EncodeHashSum(hasher))
if computePayloadHash {
if algorithm != AlgorithmSHA256 {
sha256Checksum = string(hexEncodeHashSum(sha256Hasher))
} else {
sha256Checksum = string(hexEncodeHashSum(hasher))
}
}
return checksum, sha256Checksum, nil
}
func getRequestStreamLength(req *smithyhttp.Request) (int64, error) {
if v := req.ContentLength; v > 0 {
return v, nil
}
if length, ok, err := req.StreamLength(); err != nil {
return 0, fmt.Errorf("failed getting request stream's length, %w", err)
} else if ok {
return length, nil
}
return -1, nil
}
// setMD5Checksum computes the MD5 of the request payload and sets it to the
// Content-MD5 header. Returning the MD5 base64 encoded string or error.
//
// If the MD5 is already set as the Content-MD5 header, that value will be
// returned, and nothing else will be done.
//
// If the payload is empty, no MD5 will be computed. No error will be returned.
// Empty payloads do not have an MD5 value.
//
// Replaces the smithy-go middleware for httpChecksum trait.
func setMD5Checksum(ctx context.Context, req *smithyhttp.Request) (string, error) {
if v := req.Header.Get(contentMD5Header); len(v) != 0 {
return v, nil
}
stream := req.GetStream()
if stream == nil {
return "", nil
}
if !req.IsStreamSeekable() {
return "", fmt.Errorf(
"unseekable stream is not supported for computing md5 checksum")
}
v, err := computeMD5Checksum(stream)
if err != nil {
return "", err
}
if err := req.RewindStream(); err != nil {
return "", fmt.Errorf("failed to rewind stream after computing MD5 checksum, %w", err)
}
// set the 'Content-MD5' header
req.Header.Set(contentMD5Header, string(v))
return string(v), nil
}

View file

@ -0,0 +1,117 @@
package checksum
import (
"context"
"github.com/aws/smithy-go/middleware"
)
// setupChecksumContext is the initial middleware that looks up the input
// used to configure checksum behavior. This middleware must be executed before
// input validation step or any other checksum middleware.
type setupInputContext struct {
// GetAlgorithm is a function to get the checksum algorithm of the
// input payload from the input parameters.
//
// Given the input parameter value, the function must return the algorithm
// and true, or false if no algorithm is specified.
GetAlgorithm func(interface{}) (string, bool)
}
// ID for the middleware
func (m *setupInputContext) ID() string {
return "AWSChecksum:SetupInputContext"
}
// HandleInitialize initialization middleware that setups up the checksum
// context based on the input parameters provided in the stack.
func (m *setupInputContext) HandleInitialize(
ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
// Check if validation algorithm is specified.
if m.GetAlgorithm != nil {
// check is input resource has a checksum algorithm
algorithm, ok := m.GetAlgorithm(in.Parameters)
if ok && len(algorithm) != 0 {
ctx = setContextInputAlgorithm(ctx, algorithm)
}
}
return next.HandleInitialize(ctx, in)
}
// inputAlgorithmKey is the key set on context used to identify, retrieves the
// request checksum algorithm if present on the context.
type inputAlgorithmKey struct{}
// setContextInputAlgorithm sets the request checksum algorithm on the context.
//
// Scoped to stack values.
func setContextInputAlgorithm(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, inputAlgorithmKey{}, value)
}
// getContextInputAlgorithm returns the checksum algorithm from the context if
// one was specified. Empty string is returned if one is not specified.
//
// Scoped to stack values.
func getContextInputAlgorithm(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, inputAlgorithmKey{}).(string)
return v
}
type setupOutputContext struct {
// GetValidationMode is a function to get the checksum validation
// mode of the output payload from the input parameters.
//
// Given the input parameter value, the function must return the validation
// mode and true, or false if no mode is specified.
GetValidationMode func(interface{}) (string, bool)
}
// ID for the middleware
func (m *setupOutputContext) ID() string {
return "AWSChecksum:SetupOutputContext"
}
// HandleInitialize initialization middleware that setups up the checksum
// context based on the input parameters provided in the stack.
func (m *setupOutputContext) HandleInitialize(
ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
// Check if validation mode is specified.
if m.GetValidationMode != nil {
// check is input resource has a checksum algorithm
mode, ok := m.GetValidationMode(in.Parameters)
if ok && len(mode) != 0 {
ctx = setContextOutputValidationMode(ctx, mode)
}
}
return next.HandleInitialize(ctx, in)
}
// outputValidationModeKey is the key set on context used to identify if
// output checksum validation is enabled.
type outputValidationModeKey struct{}
// setContextOutputValidationMode sets the request checksum
// algorithm on the context.
//
// Scoped to stack values.
func setContextOutputValidationMode(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, outputValidationModeKey{}, value)
}
// getContextOutputValidationMode returns response checksum validation state,
// if one was specified. Empty string is returned if one is not specified.
//
// Scoped to stack values.
func getContextOutputValidationMode(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, outputValidationModeKey{}).(string)
return v
}

View file

@ -0,0 +1,131 @@
package checksum
import (
"context"
"fmt"
"strings"
"github.com/aws/smithy-go"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// outputValidationAlgorithmsUsedKey is the metadata key for indexing the algorithms
// that were used, by the middleware's validation.
type outputValidationAlgorithmsUsedKey struct{}
// GetOutputValidationAlgorithmsUsed returns the checksum algorithms used
// stored in the middleware Metadata. Returns false if no algorithms were
// stored in the Metadata.
func GetOutputValidationAlgorithmsUsed(m middleware.Metadata) ([]string, bool) {
vs, ok := m.Get(outputValidationAlgorithmsUsedKey{}).([]string)
return vs, ok
}
// SetOutputValidationAlgorithmsUsed stores the checksum algorithms used in the
// middleware Metadata.
func SetOutputValidationAlgorithmsUsed(m *middleware.Metadata, vs []string) {
m.Set(outputValidationAlgorithmsUsedKey{}, vs)
}
// validateOutputPayloadChecksum middleware computes payload checksum of the
// received response and validates with checksum returned by the service.
type validateOutputPayloadChecksum struct {
// Algorithms represents a priority-ordered list of valid checksum
// algorithm that should be validated when present in HTTP response
// headers.
Algorithms []Algorithm
// IgnoreMultipartValidation indicates multipart checksums ending with "-#"
// will be ignored.
IgnoreMultipartValidation bool
// When set the middleware will log when output does not have checksum or
// algorithm to validate.
LogValidationSkipped bool
// When set the middleware will log when the output contains a multipart
// checksum that was, skipped and not validated.
LogMultipartValidationSkipped bool
}
func (m *validateOutputPayloadChecksum) ID() string {
return "AWSChecksum:ValidateOutputPayloadChecksum"
}
// HandleDeserialize is a Deserialize middleware that wraps the HTTP response
// body with an io.ReadCloser that will validate the its checksum.
func (m *validateOutputPayloadChecksum) HandleDeserialize(
ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
if err != nil {
return out, metadata, err
}
// If there is no validation mode specified nothing is supported.
if mode := getContextOutputValidationMode(ctx); mode != "ENABLED" {
return out, metadata, err
}
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{
Err: fmt.Errorf("unknown transport type %T", out.RawResponse),
}
}
var expectedChecksum string
var algorithmToUse Algorithm
for _, algorithm := range m.Algorithms {
value := response.Header.Get(AlgorithmHTTPHeader(algorithm))
if len(value) == 0 {
continue
}
expectedChecksum = value
algorithmToUse = algorithm
}
// TODO this must validate the validation mode is set to enabled.
logger := middleware.GetLogger(ctx)
// Skip validation if no checksum algorithm or checksum is available.
if len(expectedChecksum) == 0 || len(algorithmToUse) == 0 {
if m.LogValidationSkipped {
// TODO this probably should have more information about the
// operation output that won't be validated.
logger.Logf(logging.Warn,
"Response has no supported checksum. Not validating response payload.")
}
return out, metadata, nil
}
// Ignore multipart validation
if m.IgnoreMultipartValidation && strings.Contains(expectedChecksum, "-") {
if m.LogMultipartValidationSkipped {
// TODO this probably should have more information about the
// operation output that won't be validated.
logger.Logf(logging.Warn, "Skipped validation of multipart checksum.")
}
return out, metadata, nil
}
body, err := newValidateChecksumReader(response.Body, algorithmToUse, expectedChecksum)
if err != nil {
return out, metadata, fmt.Errorf("failed to create checksum validation reader, %w", err)
}
response.Body = body
// Update the metadata to include the set of the checksum algorithms that
// will be validated.
SetOutputValidationAlgorithmsUsed(&metadata, []string{
string(algorithmToUse),
})
return out, metadata, nil
}

View file

@ -1,3 +1,99 @@
# v1.13.17 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.16 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.15 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.14 (2022-08-31)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.13 (2022-08-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.12 (2022-08-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.11 (2022-08-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.10 (2022-08-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.9 (2022-08-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.8 (2022-07-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.7 (2022-06-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.6 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.5 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.4 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.12.0 (2022-02-24)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.10.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.2 (2021-12-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.1 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.9.0 (2021-11-06)
* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.

View file

@ -21,9 +21,8 @@ func (a AccessPointARN) GetARN() arn.ARN {
// AccessPoint resource.
//
// Supported Access point resource format:
// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName}
// - example: arn:aws:s3:us-west-2:012345678901:accesspoint/myaccesspoint
//
// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName}
// - example: arn:aws:s3:us-west-2:012345678901:accesspoint/myaccesspoint
func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) {
if isFIPS(a.Region) {
return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "FIPS region not allowed in ARN"}

View file

@ -17,15 +17,14 @@ type OutpostARN interface {
//
// Currently supported outpost ARN formats:
// * Outpost AccessPoint ARN format:
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
//
// * Outpost Bucket ARN format:
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName}
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName}
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket
//
// Other outpost ARN formats may be supported and added in the future.
//
func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) {
if len(a.Region) == 0 {
return nil, InvalidARNError{ARN: a, Reason: "region not set"}
@ -113,7 +112,6 @@ func (o OutpostBucketARN) GetARN() arn.ARN {
// bucket resource id.
//
// parseBucketResource only parses the bucket resource id.
//
func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) {
if len(resParts) == 0 {
return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"}

View file

@ -3,4 +3,4 @@
package s3shared
// goModuleVersion is the tagged release for this module
const goModuleVersion = "1.9.0"
const goModuleVersion = "1.13.17"

View file

@ -1,3 +1,146 @@
# v1.27.11 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.10 (2022-09-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.9 (2022-09-02)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.8 (2022-08-31)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.7 (2022-08-30)
* No change notes available for this release.
# v1.27.6 (2022-08-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.5 (2022-08-11)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.4 (2022-08-09)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.3 (2022-08-08)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.2 (2022-08-01)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.1 (2022-07-05)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.27.0 (2022-07-01)
* **Feature**: Add presign support for HeadBucket, DeleteObject, and DeleteBucket. Fixes [#1076](https://github.com/aws/aws-sdk-go-v2/issues/1076).
# v1.26.12 (2022-06-29)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.11 (2022-06-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.10 (2022-05-17)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.9 (2022-05-06)
* No change notes available for this release.
# v1.26.8 (2022-05-03)
* **Documentation**: Documentation only update for doc bug fixes for the S3 API docs.
# v1.26.7 (2022-04-27)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.6 (2022-04-25)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.5 (2022-04-12)
* **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly.
# v1.26.4 (2022-04-07)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.3 (2022-03-30)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.2 (2022-03-24)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.26.0 (2022-03-08)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.25.0 (2022-02-24)
* **Feature**: API client updated
* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options.
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign.
* **Dependency Update**: Updated to the latest SDK module versions
# v1.24.1 (2022-01-28)
* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR.
# v1.24.0 (2022-01-14)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Dependency Update**: Updated to the latest SDK module versions
# v1.23.0 (2022-01-07)
* **Feature**: Updated `github.com/aws/smithy-go` to latest version
* **Documentation**: API client updated
* **Dependency Update**: Updated to the latest SDK module versions
# v1.22.0 (2021-12-21)
* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens.
* **Feature**: Updated to latest service endpoints
# v1.21.0 (2021-12-02)
* **Feature**: API client updated
* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514))
* **Dependency Update**: Updated to the latest SDK module versions
# v1.20.0 (2021-11-30)
* **Feature**: API client updated
# v1.19.1 (2021-11-19)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.19.0 (2021-11-12)
* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature.

View file

@ -6,22 +6,25 @@ import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/defaults"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
"github.com/aws/aws-sdk-go-v2/internal/v4a"
acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding"
internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"
"github.com/aws/aws-sdk-go-v2/service/internal/s3shared"
s3sharedconfig "github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/internal/v4a"
smithy "github.com/aws/smithy-go"
smithydocument "github.com/aws/smithy-go/document"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"net"
"net/http"
"time"
)
@ -43,6 +46,8 @@ func New(options Options, optFns ...func(*Options)) *Client {
resolveDefaultLogger(&options)
setResolvedDefaultsMode(&options)
resolveRetryer(&options)
resolveHTTPClient(&options)
@ -78,6 +83,10 @@ type Options struct {
// The credentials object to use when signing requests.
Credentials aws.CredentialsProvider
// The configuration DefaultsMode that the SDK should use when constructing the
// clients initial default settings.
DefaultsMode aws.DefaultsMode
// Allows you to disable S3 Multi-Region access points feature.
DisableMultiRegionAccessPoints bool
@ -96,10 +105,36 @@ type Options struct {
// The region to send requests to. (Required)
Region string
// RetryMaxAttempts specifies the maximum number attempts an API client will call
// an operation that fails with a retryable error. A value of 0 is ignored, and
// will not be used to configure the API client created default retryer, or modify
// per operation call's retry max attempts. When creating a new API Clients this
// member will only be used if the Retryer Options member is nil. This value will
// be ignored if Retryer is not nil. If specified in an operation call's functional
// options with a value that is different than the constructed client's Options,
// the Client's Retryer will be wrapped to use the operation's specific
// RetryMaxAttempts value.
RetryMaxAttempts int
// RetryMode specifies the retry mode the API client will be created with, if
// Retryer option is not also specified. When creating a new API Clients this
// member will only be used if the Retryer Options member is nil. This value will
// be ignored if Retryer is not nil. Currently does not support per operation call
// overrides, may in the future.
RetryMode aws.RetryMode
// Retryer guides how HTTP requests should be retried in case of recoverable
// failures. When nil the API client will use a default retryer.
// failures. When nil the API client will use a default retryer. The kind of
// default retry created by the API client can be changed with the RetryMode
// option.
Retryer aws.Retryer
// The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
// to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You
// should not populate this structure programmatically, or rely on the values here
// within your applications.
RuntimeEnvironment aws.RuntimeEnvironment
// Allows you to enable arn region support for the service.
UseARNRegion bool
@ -126,6 +161,12 @@ type Options struct {
// Signature Version 4a (SigV4a) Signer
httpSignerV4a httpSignerV4a
// The initial DefaultsMode used when the client options were constructed. If the
// DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
// value was at that point in time. Currently does not support per operation call
// overrides, may in the future.
resolvedDefaultsMode aws.DefaultsMode
// The HTTP client to invoke API calls with. Defaults to client's default HTTP
// implementation if nil.
HTTPClient HTTPClient
@ -156,6 +197,7 @@ func (o Options) Copy() Options {
to := o
to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
copy(to.APIOptions, o.APIOptions)
return to
}
func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
@ -168,6 +210,8 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf
setSafeEventStreamClientLogMode(&options, opID)
finalizeRetryMaxAttemptOptions(&options, *c)
finalizeClientEndpointResolverOptions(&options)
resolveCredentialProvider(&options)
@ -209,17 +253,36 @@ func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
return middleware.AddSetLoggerMiddleware(stack, o.Logger)
}
func setResolvedDefaultsMode(o *Options) {
if len(o.resolvedDefaultsMode) > 0 {
return
}
var mode aws.DefaultsMode
mode.SetFromString(string(o.DefaultsMode))
if mode == aws.DefaultsModeAuto {
mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
}
o.resolvedDefaultsMode = mode
}
// NewFromConfig returns a new client from the provided config.
func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
opts := Options{
Region: cfg.Region,
HTTPClient: cfg.HTTPClient,
Credentials: cfg.Credentials,
APIOptions: cfg.APIOptions,
Logger: cfg.Logger,
ClientLogMode: cfg.ClientLogMode,
Region: cfg.Region,
DefaultsMode: cfg.DefaultsMode,
RuntimeEnvironment: cfg.RuntimeEnvironment,
HTTPClient: cfg.HTTPClient,
Credentials: cfg.Credentials,
APIOptions: cfg.APIOptions,
Logger: cfg.Logger,
ClientLogMode: cfg.ClientLogMode,
}
resolveAWSRetryerProvider(cfg, &opts)
resolveAWSRetryMaxAttempts(cfg, &opts)
resolveAWSRetryMode(cfg, &opts)
resolveAWSEndpointResolver(cfg, &opts)
resolveUseARNRegion(cfg, &opts)
resolveUseDualStackEndpoint(cfg, &opts)
@ -228,17 +291,71 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
}
func resolveHTTPClient(o *Options) {
var buildable *awshttp.BuildableClient
if o.HTTPClient != nil {
return
var ok bool
buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
if !ok {
return
}
} else {
buildable = awshttp.NewBuildableClient()
}
o.HTTPClient = awshttp.NewBuildableClient()
modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
if err == nil {
buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
dialer.Timeout = dialerTimeout
}
})
buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
transport.TLSHandshakeTimeout = tlsHandshakeTimeout
}
})
}
o.HTTPClient = buildable
}
func resolveRetryer(o *Options) {
if o.Retryer != nil {
return
}
o.Retryer = retry.NewStandard()
if len(o.RetryMode) == 0 {
modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
if err == nil {
o.RetryMode = modeConfig.RetryMode
}
}
if len(o.RetryMode) == 0 {
o.RetryMode = aws.RetryModeStandard
}
var standardOptions []func(*retry.StandardOptions)
if v := o.RetryMaxAttempts; v != 0 {
standardOptions = append(standardOptions, func(so *retry.StandardOptions) {
so.MaxAttempts = v
})
}
switch o.RetryMode {
case aws.RetryModeAdaptive:
var adaptiveOptions []func(*retry.AdaptiveModeOptions)
if len(standardOptions) != 0 {
adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) {
ao.StandardOptions = append(ao.StandardOptions, standardOptions...)
})
}
o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...)
default:
o.Retryer = retry.NewStandard(standardOptions...)
}
}
func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
@ -248,8 +365,29 @@ func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
o.Retryer = cfg.Retryer()
}
func resolveAWSRetryMode(cfg aws.Config, o *Options) {
if len(cfg.RetryMode) == 0 {
return
}
o.RetryMode = cfg.RetryMode
}
func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
if cfg.RetryMaxAttempts == 0 {
return
}
o.RetryMaxAttempts = cfg.RetryMaxAttempts
}
func finalizeRetryMaxAttemptOptions(o *Options, client Client) {
if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
return
}
o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
}
func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
if cfg.EndpointResolver == nil {
if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
return
}
o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver())
@ -344,6 +482,7 @@ func resolveCredentialProvider(o *Options) {
if o.Credentials == nil {
return
}
if _, ok := o.Credentials.(v4a.CredentialsProvider); ok {
return
}
@ -351,11 +490,11 @@ func resolveCredentialProvider(o *Options) {
switch o.Credentials.(type) {
case aws.AnonymousCredentials, *aws.AnonymousCredentials:
return
}
o.Credentials = &v4a.SymmetricCredentialAdaptor{SymmetricProvider: o.Credentials}
}
func swapWithCustomHTTPSignerMiddleware(stack *middleware.Stack, o Options) error {
mw := s3cust.NewSignHTTPRequestMiddleware(s3cust.SignHTTPRequestMiddlewareOptions{
CredentialsProvider: o.Credentials,
@ -363,11 +502,14 @@ func swapWithCustomHTTPSignerMiddleware(stack *middleware.Stack, o Options) erro
V4aSigner: o.httpSignerV4a,
LogSigning: o.ClientLogMode.IsSigning(),
})
return s3cust.RegisterSigningMiddleware(stack, mw)
}
type httpSignerV4a interface {
SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash string, service string, regionSet []string, signingTime time.Time, optFns ...func(*v4a.SignerOptions)) error
SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash,
service string, regionSet []string, signingTime time.Time,
optFns ...func(*v4a.SignerOptions)) error
}
func resolveHTTPSignerV4a(o *Options) {
@ -389,6 +531,53 @@ func addMetadataRetrieverMiddleware(stack *middleware.Stack) error {
return s3shared.AddMetadataRetrieverMiddleware(stack)
}
// ComputedInputChecksumsMetadata provides information about the algorithms used to
// compute the checksum(s) of the input payload.
type ComputedInputChecksumsMetadata struct {
// ComputedChecksums is a map of algorithm name to checksum value of the computed
// input payload's checksums.
ComputedChecksums map[string]string
}
// GetComputedInputChecksumsMetadata retrieves from the result metadata the map of
// algorithms and input payload checksums values.
func GetComputedInputChecksumsMetadata(m middleware.Metadata) (ComputedInputChecksumsMetadata, bool) {
values, ok := internalChecksum.GetComputedInputChecksums(m)
if !ok {
return ComputedInputChecksumsMetadata{}, false
}
return ComputedInputChecksumsMetadata{
ComputedChecksums: values,
}, true
}
// ChecksumValidationMetadata contains metadata such as the checksum algorithm used
// for data integrity validation.
type ChecksumValidationMetadata struct {
// AlgorithmsUsed is the set of the checksum algorithms used to validate the
// response payload. The response payload must be completely read in order for the
// checksum validation to be performed. An error is returned by the operation
// output's response io.ReadCloser if the computed checksums are invalid.
AlgorithmsUsed []string
}
// GetChecksumValidationMetadata returns the set of algorithms that will be used to
// validate the response payload with. The response payload must be completely read
// in order for the checksum validation to be performed. An error is returned by
// the operation output's response io.ReadCloser if the computed checksums are
// invalid. Returns false if no checksum algorithm used metadata was found.
func GetChecksumValidationMetadata(m middleware.Metadata) (ChecksumValidationMetadata, bool) {
values, ok := internalChecksum.GetOutputValidationAlgorithmsUsed(m)
if !ok {
return ChecksumValidationMetadata{}, false
}
return ChecksumValidationMetadata{
AlgorithmsUsed: append(make([]string, 0, len(values)), values...),
}, true
}
// nopGetBucketAccessor is no-op accessor for operation that don't support bucket
// member as input
func nopGetBucketAccessor(input interface{}) (*string, bool) {
@ -541,6 +730,9 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op
if err != nil {
return err
}
if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil {
return err
}
// add multi-region access point presigner
signermv := s3cust.NewPresignHTTPRequestMiddleware(s3cust.PresignHTTPRequestMiddlewareOptions{

View file

@ -74,9 +74,9 @@ type AbortMultipartUploadInput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
@ -94,13 +94,14 @@ type AbortMultipartUploadInput struct {
UploadId *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -31,10 +31,13 @@ import (
// determine whether the request succeeded. Note that if CompleteMultipartUpload
// fails, applications should be prepared to retry the failed requests. For more
// information, see Amazon S3 Error Best Practices
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). For
// more information about multipart uploads, see Uploading Objects Using Multipart
// Upload (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html).
// For information about permissions required to use the multipart upload API, see
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html). You
// cannot use Content-Type: application/x-www-form-urlencoded with Complete
// Multipart Upload requests. Also, if you do not provide a Content-Type header,
// CompleteMultipartUpload returns a 200 OK response. For more information about
// multipart uploads, see Uploading Objects Using Multipart Upload
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html). For
// information about permissions required to use the multipart upload API, see
// Multipart Upload and Permissions
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html).
// CompleteMultipartUpload has the following special errors:
@ -124,9 +127,9 @@ type CompleteMultipartUploadInput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
@ -143,8 +146,41 @@ type CompleteMultipartUploadInput struct {
// This member is required.
UploadId *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
// base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
// Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
// base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
// Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
// base64-encoded, 160-bit SHA-1 digest of the object. For more information, see
// Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string
// This header can be used as a data integrity check to verify that the data
// received is the same data that was originally sent. This header specifies the
// base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
// Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// The container for the multipart upload request information.
@ -152,12 +188,33 @@ type CompleteMultipartUploadInput struct {
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
// The server-side encryption (SSE) algorithm used to encrypt the object. This
// parameter is needed only when the object was created using a checksum algorithm.
// For more information, see Protecting data using SSE-C keys
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
SSECustomerAlgorithm *string
// The server-side encryption (SSE) customer managed key. This parameter is needed
// only when the object was created using a checksum algorithm. For more
// information, see Protecting data using SSE-C keys
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
SSECustomerKey *string
// The MD5 server-side encryption (SSE) customer managed key. This parameter is
// needed only when the object was created using a checksum algorithm. For more
// information, see Protecting data using SSE-C keys
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
SSECustomerKeyMD5 *string
noSmithyDocumentSerde
}
@ -176,9 +233,9 @@ type CompleteMultipartUploadOutput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
Bucket *string
@ -187,16 +244,51 @@ type CompleteMultipartUploadOutput struct {
// encryption with Amazon Web Services KMS (SSE-KMS).
BucketKeyEnabled bool
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string
// Entity tag that identifies the newly created object's data. Objects with
// different object data will have different entity tags. The entity tag is an
// opaque string. The entity tag may or may not be an MD5 digest of the object
// data. If the entity tag is not an MD5 digest of the object data, it will contain
// one or more nonhexadecimal characters and/or will consist of less than 32 or
// more than 32 hexadecimal digits.
// more than 32 hexadecimal digits. For more information about how the entity tag
// is calculated, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ETag *string
// If the object expiration is configured, this will contain the expiration date
// (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
// (expiry-date) and rule ID (rule-id). The value of rule-id is URL-encoded.
Expiration *string
// The object key of the newly created object.

View file

@ -17,7 +17,8 @@ import (
// individual objects of up to 5 TB in Amazon S3. You create a copy of your object
// up to 5 GB in size in a single atomic action using this API. However, to copy an
// object greater than 5 GB, you must use the multipart upload Upload Part - Copy
// API. For more information, see Copy Object Using the REST Multipart Upload API
// (UploadPartCopy) API. For more information, see Copy Object Using the REST
// Multipart Upload API
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html).
// All copy requests must be authenticated. Additionally, you must have read access
// to the source object and write access to the destination bucket. For more
@ -113,10 +114,24 @@ import (
// List (ACL) Overview
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html) and Managing
// ACLs Using the REST API
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html).
// Storage Class Options You can use the CopyObject action to change the storage
// class of an object that is already stored in Amazon S3 using the StorageClass
// parameter. For more information, see Storage Classes
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html). If
// the bucket that you're copying objects to uses the bucket owner enforced setting
// for S3 Object Ownership, ACLs are disabled and no longer affect permissions.
// Buckets that use this setting only accept PUT requests that don't specify an ACL
// or PUT requests that specify bucket owner full control ACLs, such as the
// bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed
// in the XML format. For more information, see Controlling ownership of objects
// and disabling ACLs
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// in the Amazon S3 User Guide. If your bucket uses the bucket owner enforced
// setting for Object Ownership, all objects written to the bucket by any account
// will be owned by the bucket owner. Checksums When copying an object, if it has a
// checksum, that checksum will be copied to the new object by default. When you
// copy the object over, you may optionally specify a different checksum algorithm
// to use with the x-amz-checksum-algorithm header. Storage Class Options You can
// use the CopyObject action to change the storage class of an object that is
// already stored in Amazon S3 using the StorageClass parameter. For more
// information, see Storage Classes
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) in
// the Amazon S3 User Guide. Versioning By default, x-amz-copy-source identifies
// the current version of an object to copy. If the current version is a delete
@ -171,9 +186,9 @@ type CopyObjectInput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
@ -189,7 +204,7 @@ type CopyObjectInput struct {
// For objects not accessed through an access point, specify the name of the source
// bucket and the key of the source object, separated by a slash (/). For example,
// to copy the object reports/january.pdf from the bucket awsexamplebucket, use
// awsexamplebucket/reports/january.pdf. The value must be URL encoded.
// awsexamplebucket/reports/january.pdf. The value must be URL-encoded.
//
// * For
// objects accessed through access points, specify the Amazon Resource Name (ARN)
@ -206,7 +221,7 @@ type CopyObjectInput struct {
// reports/january.pdf through outpost my-outpost owned by account 123456789012 in
// Region us-west-2, use the URL encoding of
// arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf.
// The value must be URL encoded.
// The value must be URL-encoded.
//
// To copy a specific version of an object, append
// ?versionId= to the value (for example,
@ -236,6 +251,12 @@ type CopyObjectInput struct {
// Specifies caching behavior along the request/reply chain.
CacheControl *string
// Indicates the algorithm you want Amazon S3 to use to create the checksum for the
// object. For more information, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumAlgorithm types.ChecksumAlgorithm
// Specifies presentational information for the object.
ContentDisposition *string
@ -277,13 +298,13 @@ type CopyObjectInput struct {
CopySourceSSECustomerKeyMD5 *string
// The account ID of the expected destination bucket owner. If the destination
// bucket is owned by a different account, the request will fail with an HTTP 403
// (Access Denied) error.
// bucket is owned by a different account, the request fails with the HTTP status
// code 403 Forbidden (access denied).
ExpectedBucketOwner *string
// The account ID of the expected source bucket owner. If the source bucket is
// owned by a different account, the request will fail with an HTTP 403 (Access
// Denied) error.
// owned by a different account, the request fails with the HTTP status code 403
// Forbidden (access denied).
ExpectedSourceBucketOwner *string
// The date and time at which the object is no longer cacheable.
@ -312,7 +333,7 @@ type CopyObjectInput struct {
// metadata provided in the request.
MetadataDirective types.MetadataDirective
// Specifies whether you want to apply a Legal Hold to the copied object.
// Specifies whether you want to apply a legal hold to the copied object.
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
// The Object Lock mode that you want to apply to the copied object.
@ -323,8 +344,8 @@ type CopyObjectInput struct {
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -34,15 +34,23 @@ import (
// created. If you create a bucket in a Region other than US East (N. Virginia),
// your application must be able to handle 307 redirect. For more information, see
// Virtual hosting of buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). When
// creating a bucket using this operation, you can optionally specify the accounts
// or groups that should be granted specific permissions on the bucket. There are
// two ways to grant the appropriate permissions using the request headers.
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html). Access
// control lists (ACLs) When creating a bucket using this operation, you can
// optionally configure the bucket ACL to specify the accounts or groups that
// should be granted specific permissions on the bucket. If your CreateBucket
// request sets bucket owner enforced for S3 Object Ownership and specifies a
// bucket ACL that provides access to an external Amazon Web Services account, your
// request fails with a 400 error and returns the
// InvalidBucketAclWithObjectOwnership error code. For more information, see
// Controlling object ownership
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// in the Amazon S3 User Guide. There are two ways to grant the appropriate
// permissions using the request headers.
//
// *
// Specify a canned ACL using the x-amz-acl request header. Amazon S3 supports a
// set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined
// set of grantees and permissions. For more information, see Canned ACL
// * Specify a canned ACL using the
// x-amz-acl request header. Amazon S3 supports a set of predefined ACLs, known as
// canned ACLs. Each canned ACL has a predefined set of grantees and permissions.
// For more information, see Canned ACL
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
//
// *
@ -51,41 +59,41 @@ import (
// x-amz-grant-full-control headers. These headers map to the set of permissions
// Amazon S3 supports in an ACL. For more information, see Access control list
// (ACL) overview
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html). You specify
// each grantee as a type=value pair, where the type is one of the following:
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html). You
// specify each grantee as a type=value pair, where the type is one of the
// following:
//
// * id
// if the value specified is the canonical user ID of an Amazon Web Services
// account
// * id if the value specified is the canonical user ID of an Amazon
// Web Services account
//
// * uri if you are granting permissions to a predefined group
// * uri if you are granting permissions to a predefined
// group
//
// *
// emailAddress if the value specified is the email address of an Amazon Web
// Services account Using email addresses to specify a grantee is only supported in
// the following Amazon Web Services Regions:
// * emailAddress if the value specified is the email address of an Amazon
// Web Services account Using email addresses to specify a grantee is only
// supported in the following Amazon Web Services Regions:
//
// * US East (N. Virginia)
// * US East (N.
// Virginia)
//
// * US West
// (N. California)
// * US West (N. California)
//
// * US West (Oregon)
//
// * Asia Pacific (Singapore)
//
// * Asia Pacific
// (Sydney)
// (Singapore)
//
// * Asia Pacific (Sydney)
//
// * Asia Pacific (Tokyo)
//
// * Europe (Ireland)
// * Europe
// (Ireland)
//
// * South America (São
// Paulo)
// * South America (São Paulo)
//
// For a list of all the Amazon S3 supported Regions and endpoints, see
// Regions and Endpoints
// For a list of all the Amazon S3
// supported Regions and endpoints, see Regions and Endpoints
// (https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region) in the
// Amazon Web Services General Reference.
//
@ -95,15 +103,27 @@ import (
// id="11112222333", id="444455556666"
//
// You can use either a canned ACL or specify
// access permissions explicitly. You cannot do both. Permissions If your
// CreateBucket request specifies ACL permissions and the ACL is public-read,
// public-read-write, authenticated-read, or if you specify access permissions
// explicitly through any other ACL, both s3:CreateBucket and s3:PutBucketAcl
// permissions are needed. If the ACL the CreateBucket request is private, only
// s3:CreateBucket permission is needed. If ObjectLockEnabledForBucket is set to
// true in your CreateBucket request, s3:PutBucketObjectLockConfiguration and
// s3:PutBucketVersioning permissions are required. The following operations are
// related to CreateBucket:
// access permissions explicitly. You cannot do both. Permissions In addition to
// s3:CreateBucket, the following permissions are required when your CreateBucket
// includes specific headers:
//
// * ACLs - If your CreateBucket request specifies ACL
// permissions and the ACL is public-read, public-read-write, authenticated-read,
// or if you specify access permissions explicitly through any other ACL, both
// s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the
// CreateBucket request is private or doesn't specify any ACLs, only
// s3:CreateBucket permission is needed.
//
// * Object Lock - If
// ObjectLockEnabledForBucket is set to true in your CreateBucket request,
// s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are
// required.
//
// * S3 Object Ownership - If your CreateBucket request includes the the
// x-amz-object-ownership header, s3:PutBucketOwnershipControls permission is
// required.
//
// The following operations are related to CreateBucket:
//
// * PutObject
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html)
@ -160,14 +180,25 @@ type CreateBucketInput struct {
// Specifies whether you want S3 Object Lock to be enabled for the new bucket.
ObjectLockEnabledForBucket bool
// The container element for object ownership for a bucket's ownership controls.
// BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the
// bucket owner if the objects are uploaded with the bucket-owner-full-control
// canned ACL. ObjectWriter - The uploading account will own the object if the
// object is uploaded with the bucket-owner-full-control canned ACL.
// BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer
// affect permissions. The bucket owner automatically owns and has full control
// over every object in the bucket. The bucket only accepts PUT requests that don't
// specify an ACL or bucket owner full control ACLs, such as the
// bucket-owner-full-control canned ACL or an equivalent form of this ACL expressed
// in the XML format.
ObjectOwnership types.ObjectOwnership
noSmithyDocumentSerde
}
type CreateBucketOutput struct {
// Specifies the Region where the bucket will be created. If you are creating a
// bucket on the US East (N. Virginia) Region (us-east-1), you do not need to
// specify the location.
// A forward slash followed by the name of the bucket.
Location *string
// Metadata pertaining to the operation's result.

View file

@ -262,9 +262,9 @@ type CreateMultipartUploadInput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
@ -290,6 +290,12 @@ type CreateMultipartUploadInput struct {
// Specifies caching behavior along the request/reply chain.
CacheControl *string
// Indicates the algorithm you want Amazon S3 to use to create the checksum for the
// object. For more information, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide.
ChecksumAlgorithm types.ChecksumAlgorithm
// Specifies presentational information for the object.
ContentDisposition *string
@ -305,7 +311,8 @@ type CreateMultipartUploadInput struct {
ContentType *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// The date and time at which the object is no longer cacheable.
@ -330,7 +337,7 @@ type CreateMultipartUploadInput struct {
// A map of metadata to store with the object in S3.
Metadata map[string]string
// Specifies whether you want to apply a Legal Hold to the uploaded object.
// Specifies whether you want to apply a legal hold to the uploaded object.
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
// Specifies the Object Lock mode that you want to apply to the uploaded object.
@ -341,8 +348,8 @@ type CreateMultipartUploadInput struct {
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
@ -433,9 +440,9 @@ type CreateMultipartUploadOutput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
Bucket *string
@ -444,6 +451,9 @@ type CreateMultipartUploadOutput struct {
// encryption with Amazon Web Services KMS (SSE-KMS).
BucketKeyEnabled bool
// The algorithm that was used to create a checksum of the object.
ChecksumAlgorithm types.ChecksumAlgorithm
// Object key for which the multipart upload was initiated.
Key *string

View file

@ -44,7 +44,8 @@ type DeleteBucketInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -166,3 +167,34 @@ func addDeleteBucketUpdateEndpoint(stack *middleware.Stack, options Options) err
DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
})
}
// PresignDeleteBucket is used to generate a presigned HTTP Request which contains
// presigned URL, signed headers and HTTP method used.
func (c *PresignClient) PresignDeleteBucket(ctx context.Context, params *DeleteBucketInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
if params == nil {
params = &DeleteBucketInput{}
}
options := c.options.copy()
for _, fn := range optFns {
fn(&options)
}
clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
result, _, err := c.client.invokeOperation(ctx, "DeleteBucket", params, clientOptFns,
c.client.addOperationDeleteBucketMiddlewares,
presignConverter(options).convertToPresignMiddleware,
addDeleteBucketPayloadAsUnsigned,
)
if err != nil {
return nil, err
}
out := result.(*v4.PresignedHTTPRequest)
return out, nil
}
func addDeleteBucketPayloadAsUnsigned(stack *middleware.Stack, options Options) error {
v4.RemoveContentSHA256HeaderMiddleware(stack)
v4.RemoveComputePayloadSHA256Middleware(stack)
return v4.AddUnsignedPayloadMiddleware(stack)
}

View file

@ -64,7 +64,8 @@ type DeleteBucketAnalyticsConfigurationInput struct {
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -47,7 +47,8 @@ type DeleteBucketCorsInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -55,7 +55,8 @@ type DeleteBucketEncryptionInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -15,16 +15,16 @@ import (
// S3 Intelligent-Tiering storage class is designed to optimize storage costs by
// automatically moving data to the most cost-effective storage access tier,
// without performance impact or operational overhead. S3 Intelligent-Tiering
// delivers automatic cost savings in two low latency and high throughput access
// tiers. For data that can be accessed asynchronously, you can choose to activate
// automatic archiving capabilities within the S3 Intelligent-Tiering storage
// class. The S3 Intelligent-Tiering storage class is the ideal storage class for
// data with unknown, changing, or unpredictable access patterns, independent of
// object size or retention period. If the size of an object is less than 128 KB,
// it is not eligible for auto-tiering. Smaller objects can be stored, but they are
// always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering
// storage class. For more information, see Storage class for automatically
// optimizing frequently and infrequently accessed objects
// delivers automatic cost savings in three low latency and high throughput access
// tiers. To get the lowest storage cost on data that can be accessed in minutes to
// hours, you can choose to activate additional archiving capabilities. The S3
// Intelligent-Tiering storage class is the ideal storage class for data with
// unknown, changing, or unpredictable access patterns, independent of object size
// or retention period. If the size of an object is less than 128 KB, it is not
// monitored and not eligible for auto-tiering. Smaller objects can be stored, but
// they are always charged at the Frequent Access tier rates in the S3
// Intelligent-Tiering storage class. For more information, see Storage class for
// automatically optimizing frequently and infrequently accessed objects
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access).
// Operations related to DeleteBucketIntelligentTieringConfiguration include:
//

View file

@ -63,7 +63,8 @@ type DeleteBucketInventoryConfigurationInput struct {
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -53,7 +53,8 @@ type DeleteBucketLifecycleInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -69,7 +69,8 @@ type DeleteBucketMetricsConfigurationInput struct {
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -46,7 +46,8 @@ type DeleteBucketOwnershipControlsInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -56,7 +56,8 @@ type DeleteBucketPolicyInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -54,7 +54,8 @@ type DeleteBucketReplicationInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -45,7 +45,8 @@ type DeleteBucketTaggingInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -54,7 +54,8 @@ type DeleteBucketWebsiteInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -65,9 +65,9 @@ type DeleteObjectInput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
@ -81,11 +81,12 @@ type DeleteObjectInput struct {
// Indicates whether S3 Object Lock should bypass Governance-mode restrictions to
// process this operation. To use this header, you must have the
// s3:PutBucketPublicAccessBlock permission.
// s3:BypassGovernanceRetention permission.
BypassGovernanceRetention bool
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// The concatenation of the authentication device's serial number, a space, and the
@ -95,8 +96,8 @@ type DeleteObjectInput struct {
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
@ -236,3 +237,34 @@ func addDeleteObjectUpdateEndpoint(stack *middleware.Stack, options Options) err
DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
})
}
// PresignDeleteObject is used to generate a presigned HTTP Request which contains
// presigned URL, signed headers and HTTP method used.
func (c *PresignClient) PresignDeleteObject(ctx context.Context, params *DeleteObjectInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
if params == nil {
params = &DeleteObjectInput{}
}
options := c.options.copy()
for _, fn := range optFns {
fn(&options)
}
clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
result, _, err := c.client.invokeOperation(ctx, "DeleteObject", params, clientOptFns,
c.client.addOperationDeleteObjectMiddlewares,
presignConverter(options).convertToPresignMiddleware,
addDeleteObjectPayloadAsUnsigned,
)
if err != nil {
return nil, err
}
out := result.(*v4.PresignedHTTPRequest)
return out, nil
}
func addDeleteObjectPayloadAsUnsigned(stack *middleware.Stack, options Options) error {
v4.RemoveContentSHA256HeaderMiddleware(stack)
v4.RemoveComputePayloadSHA256Middleware(stack)
return v4.AddUnsignedPayloadMiddleware(stack)
}

View file

@ -55,9 +55,9 @@ type DeleteObjectTaggingInput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
@ -70,7 +70,8 @@ type DeleteObjectTaggingInput struct {
Key *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// The versionId of the object that the tag-set will be removed from.

View file

@ -6,6 +6,7 @@ import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
@ -86,9 +87,9 @@ type DeleteObjectsInput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
@ -102,11 +103,25 @@ type DeleteObjectsInput struct {
// Specifies whether you want to delete this object even if it has a
// Governance-type Object Lock in place. To use this header, you must have the
// s3:PutBucketPublicAccessBlock permission.
// s3:BypassGovernanceRetention permission.
BypassGovernanceRetention bool
// Indicates the algorithm used to create the checksum for the object when using
// the SDK. This header will not provide any additional functionality if not using
// the SDK. When sending this header, there must be a corresponding x-amz-checksum
// or x-amz-trailer header sent. Otherwise, Amazon S3 fails the request with the
// HTTP status code 400 Bad Request. For more information, see Checking object
// integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html)
// in the Amazon S3 User Guide. If you provide an individual checksum, Amazon S3
// ignores any provided ChecksumAlgorithm parameter. This checksum algorithm must
// be the same for all parts and it match the checksum value supplied in the
// CreateMultipartUpload request.
ChecksumAlgorithm types.ChecksumAlgorithm
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// The concatenation of the authentication device's serial number, a space, and the
@ -116,8 +131,8 @@ type DeleteObjectsInput struct {
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
@ -193,9 +208,6 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o
if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil {
return err
}
if err = smithyhttp.AddContentChecksumMiddleware(stack); err != nil {
return err
}
if err = addOpDeleteObjectsValidationMiddleware(stack); err != nil {
return err
}
@ -205,6 +217,9 @@ func (c *Client) addOperationDeleteObjectsMiddlewares(stack *middleware.Stack, o
if err = addMetadataRetrieverMiddleware(stack); err != nil {
return err
}
if err = addDeleteObjectsInputChecksumMiddlewares(stack, options); err != nil {
return err
}
if err = addDeleteObjectsUpdateEndpoint(stack, options); err != nil {
return err
}
@ -232,6 +247,26 @@ func newServiceMetadataMiddleware_opDeleteObjects(region string) *awsmiddleware.
}
}
// getDeleteObjectsRequestAlgorithmMember gets the request checksum algorithm value
// provided as input.
func getDeleteObjectsRequestAlgorithmMember(input interface{}) (string, bool) {
in := input.(*DeleteObjectsInput)
if len(in.ChecksumAlgorithm) == 0 {
return "", false
}
return string(in.ChecksumAlgorithm), true
}
func addDeleteObjectsInputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
return internalChecksum.AddInputMiddleware(stack, internalChecksum.InputMiddlewareOptions{
GetAlgorithm: getDeleteObjectsRequestAlgorithmMember,
RequireChecksum: true,
EnableTrailingChecksum: false,
EnableComputeSHA256PayloadHash: true,
EnableDecodedContentLengthHeader: true,
})
}
// getDeleteObjectsBucketMember returns a pointer to string denoting a provided
// bucket member valueand a boolean indicating if the input has a modeled bucket
// name,

View file

@ -58,7 +58,8 @@ type DeletePublicAccessBlockInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -59,7 +59,8 @@ type GetBucketAccelerateConfigurationInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -16,7 +16,13 @@ import (
// access control list (ACL) of a bucket. To use GET to return the ACL of the
// bucket, you must have READ_ACP access to the bucket. If READ_ACP permission is
// granted to the anonymous user, you can return the ACL of the bucket without
// using an authorization header. Related Resources
// using an authorization header. If your bucket uses the bucket owner enforced
// setting for S3 Object Ownership, requests to read ACLs are still supported and
// return the bucket-owner-full-control ACL with the owner being the account that
// created the bucket. For more information, see Controlling object ownership and
// disabling ACLs
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// in the Amazon S3 User Guide. Related Resources
//
// * ListObjects
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html)
@ -43,7 +49,8 @@ type GetBucketAclInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -65,7 +65,8 @@ type GetBucketAnalyticsConfigurationInput struct {
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -12,10 +12,11 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Returns the cors configuration information set for the bucket. To use this
// operation, you must have permission to perform the s3:GetBucketCORS action. By
// default, the bucket owner has this permission and can grant it to others. For
// more information about cors, see Enabling Cross-Origin Resource Sharing
// Returns the Cross-Origin Resource Sharing (CORS) configuration information set
// for the bucket. To use this operation, you must have permission to perform the
// s3:GetBucketCORS action. By default, the bucket owner has this permission and
// can grant it to others. For more information about CORS, see Enabling
// Cross-Origin Resource Sharing
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html). The following
// operations are related to GetBucketCors:
//
@ -48,7 +49,8 @@ type GetBucketCorsInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -59,7 +59,8 @@ type GetBucketEncryptionInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -16,16 +16,16 @@ import (
// Intelligent-Tiering storage class is designed to optimize storage costs by
// automatically moving data to the most cost-effective storage access tier,
// without performance impact or operational overhead. S3 Intelligent-Tiering
// delivers automatic cost savings in two low latency and high throughput access
// tiers. For data that can be accessed asynchronously, you can choose to activate
// automatic archiving capabilities within the S3 Intelligent-Tiering storage
// class. The S3 Intelligent-Tiering storage class is the ideal storage class for
// data with unknown, changing, or unpredictable access patterns, independent of
// object size or retention period. If the size of an object is less than 128 KB,
// it is not eligible for auto-tiering. Smaller objects can be stored, but they are
// always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering
// storage class. For more information, see Storage class for automatically
// optimizing frequently and infrequently accessed objects
// delivers automatic cost savings in three low latency and high throughput access
// tiers. To get the lowest storage cost on data that can be accessed in minutes to
// hours, you can choose to activate additional archiving capabilities. The S3
// Intelligent-Tiering storage class is the ideal storage class for data with
// unknown, changing, or unpredictable access patterns, independent of object size
// or retention period. If the size of an object is less than 128 KB, it is not
// monitored and not eligible for auto-tiering. Smaller objects can be stored, but
// they are always charged at the Frequent Access tier rates in the S3
// Intelligent-Tiering storage class. For more information, see Storage class for
// automatically optimizing frequently and infrequently accessed objects
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access).
// Operations related to GetBucketIntelligentTieringConfiguration include:
//

View file

@ -63,7 +63,8 @@ type GetBucketInventoryConfigurationInput struct {
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -80,7 +80,8 @@ type GetBucketLifecycleConfigurationInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -57,7 +57,8 @@ type GetBucketLocationInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -45,7 +45,8 @@ type GetBucketLoggingInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -70,7 +70,8 @@ type GetBucketMetricsConfigurationInput struct {
Id *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -50,7 +50,8 @@ type GetBucketNotificationConfigurationInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -60,6 +61,9 @@ type GetBucketNotificationConfigurationInput struct {
// element is empty, notifications are turned off for the bucket.
type GetBucketNotificationConfigurationOutput struct {
// Enables delivery of events to Amazon EventBridge.
EventBridgeConfiguration *types.EventBridgeConfiguration
// Describes the Lambda functions to invoke and the events for which to invoke
// them.
LambdaFunctionConfigurations []types.LambdaFunctionConfiguration

View file

@ -14,10 +14,10 @@ import (
// Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you
// must have the s3:GetBucketOwnershipControls permission. For more information
// about Amazon S3 permissions, see Specifying Permissions in a Policy
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html).
// about Amazon S3 permissions, see Specifying permissions in a policy
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html).
// For information about Amazon S3 Object Ownership, see Using Object Ownership
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html).
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html).
// The following operations are related to GetBucketOwnershipControls:
//
// *
@ -47,7 +47,8 @@ type GetBucketOwnershipControlsInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde
@ -55,8 +56,8 @@ type GetBucketOwnershipControlsInput struct {
type GetBucketOwnershipControlsOutput struct {
// The OwnershipControls (BucketOwnerPreferred or ObjectWriter) currently in effect
// for this Amazon S3 bucket.
// The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or
// ObjectWriter) currently in effect for this Amazon S3 bucket.
OwnershipControls *types.OwnershipControls
// Metadata pertaining to the operation's result.

View file

@ -51,7 +51,8 @@ type GetBucketPolicyInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -60,7 +60,8 @@ type GetBucketPolicyStatusInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -58,7 +58,8 @@ type GetBucketReplicationInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -43,7 +43,8 @@ type GetBucketRequestPaymentInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -17,16 +17,15 @@ import (
// bucket owner has this permission and can grant this permission to others.
// GetBucketTagging has the following special error:
//
// * Error code:
// NoSuchTagSetError
//
// * Description: There is no tag set associated with the
// bucket.
//
// The following operations are related to GetBucketTagging:
// * Error code: NoSuchTagSet
//
// *
// PutBucketTagging
// Description: There is no tag set associated with the bucket.
//
// The following
// operations are related to GetBucketTagging:
//
// * PutBucketTagging
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html)
//
// *
@ -55,7 +54,8 @@ type GetBucketTaggingInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -52,7 +52,8 @@ type GetBucketVersioningInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -51,7 +51,8 @@ type GetBucketWebsiteInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -6,6 +6,7 @@ import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
internalChecksum "github.com/aws/aws-sdk-go-v2/service/internal/checksum"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
@ -29,10 +30,7 @@ import (
// specify the resource as /examplebucket/photos/2006/February/sample.jpg. For more
// information about request types, see HTTP Host Header Bucket Specification
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket).
// To distribute large files to many people, you can save bandwidth costs by using
// BitTorrent. For more information, see Amazon S3 Torrent
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html). For more
// information about returning the ACL of an object, see GetObjectAcl
// For more information about returning the ACL of an object, see GetObjectAcl
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). If the
// object you are retrieving is stored in the S3 Glacier or S3 Glacier Deep Archive
// storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep
@ -84,15 +82,17 @@ import (
// Versioning By default, the GET action returns the current version of an
// object. To return a different version, use the versionId subresource.
//
// * You
// need the s3:GetObjectVersion permission to access a specific version of an
// object.
// * If you
// supply a versionId, you need the s3:GetObjectVersion permission to access a
// specific version of an object. If you request a specific version, you do not
// need to have the s3:GetObject permission.
//
// * If the current version of the object is a delete marker, Amazon S3
// behaves as if the object was deleted and includes x-amz-delete-marker: true in
// the response.
// * If the current version of the
// object is a delete marker, Amazon S3 behaves as if the object was deleted and
// includes x-amz-delete-marker: true in the response.
//
// For more information about versioning, see PutBucketVersioning
// For more information about
// versioning, see PutBucketVersioning
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html).
// Overriding Response Header Values There are times when you want to override
// certain response header values in a GET response. For example, you might
@ -171,9 +171,9 @@ type GetObjectInput struct {
// action with Amazon S3 on Outposts, you must direct requests to the S3 on
// Outposts hostname. The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
@ -185,24 +185,28 @@ type GetObjectInput struct {
// This member is required.
Key *string
// To retrieve the checksum, this mode must be enabled.
ChecksumMode types.ChecksumMode
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// Return the object only if its entity tag (ETag) is the same as the one
// specified, otherwise return a 412 (precondition failed).
// specified; otherwise, return a 412 (precondition failed) error.
IfMatch *string
// Return the object only if it has been modified since the specified time,
// otherwise return a 304 (not modified).
// Return the object only if it has been modified since the specified time;
// otherwise, return a 304 (not modified) error.
IfModifiedSince *time.Time
// Return the object only if its entity tag (ETag) is different from the one
// specified, otherwise return a 304 (not modified).
// specified; otherwise, return a 304 (not modified) error.
IfNoneMatch *string
// Return the object only if it has not been modified since the specified time,
// otherwise return a 412 (precondition failed).
// Return the object only if it has not been modified since the specified time;
// otherwise, return a 412 (precondition failed) error.
IfUnmodifiedSince *time.Time
// Part number of the object being read. This is a positive integer between 1 and
@ -219,8 +223,8 @@ type GetObjectInput struct {
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
@ -280,6 +284,38 @@ type GetObjectOutput struct {
// Specifies caching behavior along the request/reply chain.
CacheControl *string
// The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32 *string
// The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumCRC32C *string
// The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA1 *string
// The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
// present if it was uploaded with the object. With multipart uploads, this may not
// be a checksum value of the object. For more information about how checksums are
// calculated with multipart uploads, see Checking object integrity
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums)
// in the Amazon S3 User Guide.
ChecksumSHA256 *string
// Specifies presentational information for the object.
ContentDisposition *string
@ -304,14 +340,14 @@ type GetObjectOutput struct {
// Marker. If false, this response header does not appear in the response.
DeleteMarker bool
// An ETag is an opaque identifier assigned by a web server to a specific version
// of a resource found at a URL.
// An entity tag (ETag) is an opaque identifier assigned by a web server to a
// specific version of a resource found at a URL.
ETag *string
// If the object expiration is configured (see PUT Bucket lifecycle), the response
// includes this header. It includes the expiry-date and rule-id key-value pairs
// providing object expiration information. The value of the rule-id is URL
// encoded.
// providing object expiration information. The value of the rule-id is
// URL-encoded.
Expiration *string
// The date and time at which the object is no longer cacheable.
@ -341,7 +377,8 @@ type GetObjectOutput struct {
// The date and time when this object's Object Lock will expire.
ObjectLockRetainUntilDate *time.Time
// The count of parts this object has.
// The count of parts this object has. This value is only returned if you specify
// partNumber in your request and the object was uploaded as a multipart upload.
PartsCount int32
// Amazon S3 can return this if your request involves a bucket that is either a
@ -449,6 +486,9 @@ func (c *Client) addOperationGetObjectMiddlewares(stack *middleware.Stack, optio
if err = addMetadataRetrieverMiddleware(stack); err != nil {
return err
}
if err = addGetObjectOutputChecksumMiddlewares(stack, options); err != nil {
return err
}
if err = addGetObjectUpdateEndpoint(stack, options); err != nil {
return err
}
@ -476,6 +516,26 @@ func newServiceMetadataMiddleware_opGetObject(region string) *awsmiddleware.Regi
}
}
// getGetObjectRequestValidationModeMember gets the request checksum validation
// mode provided as input.
func getGetObjectRequestValidationModeMember(input interface{}) (string, bool) {
in := input.(*GetObjectInput)
if len(in.ChecksumMode) == 0 {
return "", false
}
return string(in.ChecksumMode), true
}
func addGetObjectOutputChecksumMiddlewares(stack *middleware.Stack, options Options) error {
return internalChecksum.AddOutputMiddleware(stack, internalChecksum.OutputMiddlewareOptions{
GetValidationMode: getGetObjectRequestValidationModeMember,
ValidationAlgorithms: []string{"CRC32", "CRC32C", "SHA256", "SHA1"},
IgnoreMultipartValidation: true,
LogValidationSkipped: true,
LogMultipartValidationSkipped: true,
})
}
// getGetObjectBucketMember returns a pointer to string denoting a provided bucket
// member valueand a boolean indicating if the input has a modeled bucket name,
func getGetObjectBucketMember(input interface{}) (*string, bool) {

View file

@ -13,16 +13,29 @@ import (
)
// Returns the access control list (ACL) of an object. To use this operation, you
// must have READ_ACP access to the object. This action is not supported by Amazon
// S3 on Outposts. Versioning By default, GET returns ACL information about the
// current version of an object. To return ACL information about a different
// version, use the versionId subresource. The following operations are related to
// must have s3:GetObjectAcl permissions or READ_ACP access to the object. For more
// information, see Mapping of ACL permissions and access policy permissions
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping)
// in the Amazon S3 User Guide This action is not supported by Amazon S3 on
// Outposts. Versioning By default, GET returns ACL information about the current
// version of an object. To return ACL information about a different version, use
// the versionId subresource. If your bucket uses the bucket owner enforced setting
// for S3 Object Ownership, requests to read ACLs are still supported and return
// the bucket-owner-full-control ACL with the owner being the account that created
// the bucket. For more information, see Controlling object ownership and
// disabling ACLs
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html)
// in the Amazon S3 User Guide. The following operations are related to
// GetObjectAcl:
//
// * GetObject
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
//
// *
// GetObjectAttributes
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
//
// *
// DeleteObject
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html)
//
@ -64,13 +77,14 @@ type GetObjectAclInput struct {
Key *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -0,0 +1,363 @@
// Code generated by smithy-go-codegen DO NOT EDIT.
package s3
import (
"context"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
"time"
)
// Retrieves all the metadata from an object without returning the object itself.
// This action is useful if you're interested only in an object's metadata. To use
// GetObjectAttributes, you must have READ access to the object.
// GetObjectAttributes combines the functionality of GetObjectAcl,
// GetObjectLegalHold, GetObjectLockConfiguration, GetObjectRetention,
// GetObjectTagging, HeadObject, and ListParts. All of the data returned with each
// of those individual calls can be returned with a single call to
// GetObjectAttributes. If you encrypt an object by using server-side encryption
// with customer-provided encryption keys (SSE-C) when you store the object in
// Amazon S3, then when you retrieve the metadata from the object, you must use the
// following headers:
//
// * x-amz-server-side-encryption-customer-algorithm
//
// *
// x-amz-server-side-encryption-customer-key
//
// *
// x-amz-server-side-encryption-customer-key-MD5
//
// For more information about SSE-C,
// see Server-Side Encryption (Using Customer-Provided Encryption Keys)
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html)
// in the Amazon S3 User Guide.
//
// * Encryption request headers, such as
// x-amz-server-side-encryption, should not be sent for GET requests if your object
// uses server-side encryption with Amazon Web Services KMS keys stored in Amazon
// Web Services Key Management Service (SSE-KMS) or server-side encryption with
// Amazon S3 managed encryption keys (SSE-S3). If your object does use these types
// of keys, you'll get an HTTP 400 Bad Request error.
//
// * The last modified property
// in this case is the creation date of the object.
//
// Consider the following when
// using request headers:
//
// * If both of the If-Match and If-Unmodified-Since
// headers are present in the request as follows, then Amazon S3 returns the HTTP
// status code 200 OK and the data requested:
//
// * If-Match condition evaluates to
// true.
//
// * If-Unmodified-Since condition evaluates to false.
//
// * If both of the
// If-None-Match and If-Modified-Since headers are present in the request as
// follows, then Amazon S3 returns the HTTP status code 304 Not Modified:
//
// *
// If-None-Match condition evaluates to false.
//
// * If-Modified-Since condition
// evaluates to true.
//
// For more information about conditional requests, see RFC
// 7232 (https://tools.ietf.org/html/rfc7232). Permissions The permissions that you
// need to use this operation depend on whether the bucket is versioned. If the
// bucket is versioned, you need both the s3:GetObjectVersion and
// s3:GetObjectVersionAttributes permissions for this operation. If the bucket is
// not versioned, you need the s3:GetObject and s3:GetObjectAttributes permissions.
// For more information, see Specifying Permissions in a Policy
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html) in
// the Amazon S3 User Guide. If the object that you request does not exist, the
// error Amazon S3 returns depends on whether you also have the s3:ListBucket
// permission.
//
// * If you have the s3:ListBucket permission on the bucket, Amazon S3
// returns an HTTP status code 404 Not Found ("no such key") error.
//
// * If you don't
// have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403
// Forbidden ("access denied") error.
//
// The following actions are related to
// GetObjectAttributes:
//
// * GetObject
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html)
//
// *
// GetObjectAcl
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html)
//
// *
// GetObjectLegalHold
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html)
//
// *
// GetObjectLockConfiguration
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html)
//
// *
// GetObjectRetention
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html)
//
// *
// GetObjectTagging
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html)
//
// *
// HeadObject
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html)
//
// *
// ListParts (https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html)
func (c *Client) GetObjectAttributes(ctx context.Context, params *GetObjectAttributesInput, optFns ...func(*Options)) (*GetObjectAttributesOutput, error) {
if params == nil {
params = &GetObjectAttributesInput{}
}
result, metadata, err := c.invokeOperation(ctx, "GetObjectAttributes", params, optFns, c.addOperationGetObjectAttributesMiddlewares)
if err != nil {
return nil, err
}
out := result.(*GetObjectAttributesOutput)
out.ResultMetadata = metadata
return out, nil
}
type GetObjectAttributesInput struct {
// The name of the bucket that contains the object. When using this action with an
// access point, you must direct requests to the access point hostname. The access
// point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
// access point ARNs, see Using access points
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html)
// in the Amazon S3 User Guide. When using this action with Amazon S3 on Outposts,
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
// This member is required.
Bucket *string
// The object key.
//
// This member is required.
Key *string
// An XML header that specifies the fields at the root level that you want returned
// in the response. Fields that you do not specify are not returned.
//
// This member is required.
ObjectAttributes []types.ObjectAttributes
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// Sets the maximum number of parts to return.
MaxParts int32
// Specifies the part after which listing should begin. Only parts with higher part
// numbers will be listed.
PartNumberMarker *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
// Specifies the algorithm to use when encrypting the object (for example, AES256).
SSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use in
// encrypting data. This value is used to store the object and then it is
// discarded; Amazon S3 does not store the encryption key. The key must be
// appropriate for use with the algorithm specified in the
// x-amz-server-side-encryption-customer-algorithm header.
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
SSECustomerKeyMD5 *string
// The version ID used to reference a specific version of the object.
VersionId *string
noSmithyDocumentSerde
}
type GetObjectAttributesOutput struct {
// The checksum or digest of the object.
Checksum *types.Checksum
// Specifies whether the object retrieved was (true) or was not (false) a delete
// marker. If false, this response header does not appear in the response.
DeleteMarker bool
// An ETag is an opaque identifier assigned by a web server to a specific version
// of a resource found at a URL.
ETag *string
// The creation date of the object.
LastModified *time.Time
// A collection of parts associated with a multipart upload.
ObjectParts *types.GetObjectAttributesParts
// The size of the object in bytes.
ObjectSize int64
// If present, indicates that the requester was successfully charged for the
// request.
RequestCharged types.RequestCharged
// Provides the storage class information of the object. Amazon S3 returns this
// header for all objects except for S3 Standard storage class objects. For more
// information, see Storage Classes
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html).
StorageClass types.StorageClass
// The version ID of the object.
VersionId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationGetObjectAttributesMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsRestxml_serializeOpGetObjectAttributes{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestxml_deserializeOpGetObjectAttributes{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = swapWithCustomHTTPSignerMiddleware(stack, options); err != nil {
return err
}
if err = addOpGetObjectAttributesValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetObjectAttributes(options.Region), middleware.Before); err != nil {
return err
}
if err = addMetadataRetrieverMiddleware(stack); err != nil {
return err
}
if err = addGetObjectAttributesUpdateEndpoint(stack, options); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
return err
}
if err = disableAcceptEncodingGzip(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
func newServiceMetadataMiddleware_opGetObjectAttributes(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "s3",
OperationName: "GetObjectAttributes",
}
}
// getGetObjectAttributesBucketMember returns a pointer to string denoting a
// provided bucket member valueand a boolean indicating if the input has a modeled
// bucket name,
func getGetObjectAttributesBucketMember(input interface{}) (*string, bool) {
in := input.(*GetObjectAttributesInput)
if in.Bucket == nil {
return nil, false
}
return in.Bucket, true
}
func addGetObjectAttributesUpdateEndpoint(stack *middleware.Stack, options Options) error {
return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
Accessor: s3cust.UpdateEndpointParameterAccessor{
GetBucketFromInput: getGetObjectAttributesBucketMember,
},
UsePathStyle: options.UsePathStyle,
UseAccelerate: options.UseAccelerate,
SupportsAccelerate: true,
TargetS3ObjectLambda: false,
EndpointResolver: options.EndpointResolver,
EndpointResolverOptions: options.EndpointOptions,
UseARNRegion: options.UseARNRegion,
DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
})
}

View file

@ -12,9 +12,13 @@ import (
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Gets an object's current Legal Hold status. For more information, see Locking
// Gets an object's current legal hold status. For more information, see Locking
// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This
// action is not supported by Amazon S3 on Outposts.
// action is not supported by Amazon S3 on Outposts. The following action is
// related to GetObjectLegalHold:
//
// * GetObjectAttributes
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalHoldInput, optFns ...func(*Options)) (*GetObjectLegalHoldOutput, error) {
if params == nil {
params = &GetObjectLegalHoldInput{}
@ -32,7 +36,7 @@ func (c *Client) GetObjectLegalHold(ctx context.Context, params *GetObjectLegalH
type GetObjectLegalHoldInput struct {
// The bucket name containing the object whose Legal Hold status you want to
// The bucket name containing the object whose legal hold status you want to
// retrieve. When using this action with an access point, you must direct requests
// to the access point hostname. The access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
@ -45,24 +49,25 @@ type GetObjectLegalHoldInput struct {
// This member is required.
Bucket *string
// The key name for the object whose Legal Hold status you want to retrieve.
// The key name for the object whose legal hold status you want to retrieve.
//
// This member is required.
Key *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer
// The version ID of the object whose Legal Hold status you want to retrieve.
// The version ID of the object whose legal hold status you want to retrieve.
VersionId *string
noSmithyDocumentSerde
@ -70,7 +75,7 @@ type GetObjectLegalHoldInput struct {
type GetObjectLegalHoldOutput struct {
// The current Legal Hold status for the specified object.
// The current legal hold status for the specified object.
LegalHold *types.ObjectLockLegalHold
// Metadata pertaining to the operation's result.

View file

@ -15,7 +15,12 @@ import (
// Gets the Object Lock configuration for a bucket. The rule specified in the
// Object Lock configuration will be applied by default to every new object placed
// in the specified bucket. For more information, see Locking Objects
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html).
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). The
// following action is related to GetObjectLockConfiguration:
//
// *
// GetObjectAttributes
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
func (c *Client) GetObjectLockConfiguration(ctx context.Context, params *GetObjectLockConfigurationInput, optFns ...func(*Options)) (*GetObjectLockConfigurationOutput, error) {
if params == nil {
params = &GetObjectLockConfigurationInput{}
@ -47,7 +52,8 @@ type GetObjectLockConfigurationInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

View file

@ -14,7 +14,11 @@ import (
// Retrieves an object's retention settings. For more information, see Locking
// Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html). This
// action is not supported by Amazon S3 on Outposts.
// action is not supported by Amazon S3 on Outposts. The following action is
// related to GetObjectRetention:
//
// * GetObjectAttributes
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
func (c *Client) GetObjectRetention(ctx context.Context, params *GetObjectRetentionInput, optFns ...func(*Options)) (*GetObjectRetentionOutput, error) {
if params == nil {
params = &GetObjectRetentionInput{}
@ -51,13 +55,14 @@ type GetObjectRetentionInput struct {
Key *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -22,14 +22,18 @@ import (
// permission and can grant this permission to others. For information about the
// Amazon S3 object tagging feature, see Object Tagging
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html). The
// following action is related to GetObjectTagging:
// following actions are related to GetObjectTagging:
//
// * PutObjectTagging
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html)
// * DeleteObjectTagging
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html)
//
// *
// DeleteObjectTagging
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html)
// GetObjectAttributes
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html)
//
// *
// PutObjectTagging
// (https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html)
func (c *Client) GetObjectTagging(ctx context.Context, params *GetObjectTaggingInput, optFns ...func(*Options)) (*GetObjectTaggingOutput, error) {
if params == nil {
params = &GetObjectTaggingInput{}
@ -59,9 +63,9 @@ type GetObjectTaggingInput struct {
// you must direct requests to the S3 on Outposts hostname. The S3 on Outposts
// hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using
// this action using S3 on Outposts through the Amazon Web Services SDKs, you
// this action with S3 on Outposts through the Amazon Web Services SDKs, you
// provide the Outposts bucket ARN in place of the bucket name. For more
// information about S3 on Outposts ARNs, see Using S3 on Outposts
// information about S3 on Outposts ARNs, see Using Amazon S3 on Outposts
// (https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html) in the
// Amazon S3 User Guide.
//
@ -74,13 +78,14 @@ type GetObjectTaggingInput struct {
Key *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -53,13 +53,14 @@ type GetObjectTorrentInput struct {
Key *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. For information
// about downloading objects from requester pays buckets, see Downloading Objects
// in Requestor Pays Buckets
// about downloading objects from Requester Pays buckets, see Downloading Objects
// in Requester Pays Buckets
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html)
// in the Amazon S3 User Guide.
RequestPayer types.RequestPayer

View file

@ -66,7 +66,8 @@ type GetPublicAccessBlockInput struct {
Bucket *string
// The account ID of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied) error.
// different account, the request fails with the HTTP status code 403 Forbidden
// (access denied).
ExpectedBucketOwner *string
noSmithyDocumentSerde

Some files were not shown because too many files have changed in this diff Show more