vendor: make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2022-08-15 00:53:41 +03:00
parent d335694add
commit 308f29f674
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
110 changed files with 10874 additions and 6632 deletions

View file

@ -363,7 +363,7 @@ benchmark-pure:
vendor-update:
go get -u -d ./lib/...
go get -u -d ./app/...
go mod tidy -compat=1.17
go mod tidy -compat=1.18
go mod vendor
app-local:

20
go.mod
View file

@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics
go 1.17
require (
cloud.google.com/go/storage v1.24.0
cloud.google.com/go/storage v1.25.0
github.com/VictoriaMetrics/fastcache v1.10.0
// Do not use the original github.com/valyala/fasthttp because of issues
@ -11,7 +11,7 @@ require (
github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.22.1
github.com/VictoriaMetrics/metricsql v0.44.1
github.com/aws/aws-sdk-go v1.44.70
github.com/aws/aws-sdk-go v1.44.76
github.com/cespare/xxhash/v2 v2.1.2
// TODO: switch back to https://github.com/cheggaaa/pb/v3 when v3-pooling branch
@ -22,22 +22,22 @@ require (
github.com/influxdata/influxdb v1.10.0
github.com/klauspost/compress v1.15.9
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
github.com/urfave/cli/v2 v2.11.1
github.com/urfave/cli/v2 v2.11.2
github.com/valyala/fastjson v1.6.3
github.com/valyala/fastrand v1.1.0
github.com/valyala/fasttemplate v1.2.1
github.com/valyala/gozstd v1.17.0
github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.0.0-20220805013720-a33c5aa5df48
golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c
golang.org/x/sys v0.0.0-20220804214406-8e32c043e418
google.golang.org/api v0.91.0
golang.org/x/net v0.0.0-20220812174116-3211cb980234
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab
google.golang.org/api v0.92.0
gopkg.in/yaml.v2 v2.4.0
)
require (
cloud.google.com/go v0.103.0 // indirect
cloud.google.com/go/compute v1.7.0 // indirect
cloud.google.com/go/compute v1.8.0 // indirect
cloud.google.com/go/iam v0.3.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
@ -69,13 +69,13 @@ require (
github.com/valyala/histogram v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.opencensus.io v0.23.0 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220805133916-01dd62135a58 // indirect
google.golang.org/genproto v0.0.0-20220812140447-cec7f5303424 // indirect
google.golang.org/grpc v1.48.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
)

39
go.sum
View file

@ -45,8 +45,9 @@ cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJW
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk=
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/compute v1.8.0 h1:NLtR56/eKx9K1s2Tw/4hec2vsU1S3WeKRMj8HXbBo6E=
cloud.google.com/go/compute v1.8.0/go.mod h1:boQ44qJsMqZjKzzsEkoJWQGj4h8ygmyk17UArClWzmg=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc=
@ -62,8 +63,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
cloud.google.com/go/storage v1.24.0 h1:a4N0gIkx83uoVFGz8B2eAV3OhN90QoWF5OZWLKl39ig=
cloud.google.com/go/storage v1.24.0/go.mod h1:3xrJEFMXBsQLgxwThyjuD3aYlroL0TMRec1ypGUQ0KE=
cloud.google.com/go/storage v1.25.0 h1:D2Dn0PslpK7Z3B2AvuUHyIC762bDbGJdlmQlCBR71os=
cloud.google.com/go/storage v1.25.0/go.mod h1:Qys4JU+jeup3QnuKKAosWuxrD95C4MSqxfVDnSirDsI=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
@ -148,8 +149,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.44.70 h1:wrwAbqJqf+ncEK1F/bXTYpgO6zXIgQXi/2ppBgmYI9g=
github.com/aws/aws-sdk-go v1.44.70/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.44.76 h1:5e8yGO/XeNYKckOjpBKUd5wStf0So3CrQIiOMCVLpOI=
github.com/aws/aws-sdk-go v1.44.76/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -826,8 +827,8 @@ github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.11.1 h1:UKK6SP7fV3eKOefbS87iT9YHefv7iB/53ih6e+GNAsE=
github.com/urfave/cli/v2 v2.11.1/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
github.com/urfave/cli/v2 v2.11.2 h1:FVfNg4m3vbjbBpLYxW//WjxUoHvJ9TlppXcqY9Q9ZfA=
github.com/urfave/cli/v2 v2.11.2/go.mod h1:f8iq5LtQ/bLxafbdBSLPPNsgaW0l/2fYYEHhAyPlwvo=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
@ -880,8 +881,8 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
@ -1006,8 +1007,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220805013720-a33c5aa5df48 h1:N9Vc/rorQUDes6B9CNdIxAn5jODGj2wzfrei2x4wNj4=
golang.org/x/net v0.0.0-20220805013720-a33c5aa5df48/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E=
golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1029,8 +1030,8 @@ golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c h1:q3gFqPqH7NVofKo3c3yETAP//pPI+G5mvB7qqj1Y5kY=
golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7 h1:dtndE8FcEta75/4kHF3AbpuWzV6f1LjnLrM4pe2SZrw=
golang.org/x/oauth2 v0.0.0-20220808172628-8227340efae7/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1142,8 +1143,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220804214406-8e32c043e418 h1:9vYwv7OjYaky/tlAeD7C4oC9EsPTlaFl1H2jS++V+ME=
golang.org/x/sys v0.0.0-20220804214406-8e32c043e418/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1291,8 +1292,8 @@ google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/api v0.91.0 h1:731+JzuwaJoZXRQGmPoBiV+SrsAfUaIkdMCWTcQNPyA=
google.golang.org/api v0.91.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/api v0.92.0 h1:8JHk7q/+rJla+iRsWj9FQ9/wjv2M1SKtpKSdmLhxPT0=
google.golang.org/api v0.92.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1386,8 +1387,8 @@ google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljW
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220805133916-01dd62135a58 h1:sRT5xdTkj1Kbk30qbYC7VyMj73N5pZYsw6v+Nrzdhno=
google.golang.org/genproto v0.0.0-20220805133916-01dd62135a58/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
google.golang.org/genproto v0.0.0-20220812140447-cec7f5303424 h1:zZnTt15U44/Txe/9cN/tVbteBkPMiyXK48hPsKRmqj4=
google.golang.org/genproto v0.0.0-20220812140447-cec7f5303424/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -1,3 +1,3 @@
{
"storage": "1.24.0"
"storage": "1.25.0"
}

View file

@ -1,6 +1,14 @@
# Changes
## [1.25.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.24.0...storage/v1.25.0) (2022-08-11)
### Features
* **storage/internal:** Add routing annotations ([8a8ba85](https://github.com/googleapis/google-cloud-go/commit/8a8ba85311f85701c97fd7c10f1d88b738ce423f))
* **storage:** refactor to use transport-agnostic interface ([#6465](https://github.com/googleapis/google-cloud-go/issues/6465)) ([d03c3e1](https://github.com/googleapis/google-cloud-go/commit/d03c3e15a79fe9afa1232d9c8bd4c484a9bb927e))
## [1.24.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.23.0...storage/v1.24.0) (2022-07-20)

View file

@ -21,7 +21,6 @@ import (
"cloud.google.com/go/internal/trace"
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
)
@ -121,111 +120,46 @@ func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
}
func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
a.configureCall(ctx, req)
err = run(ctx, func() error {
acls, err = req.Do()
return err
}, a.retry, true, setRetryHeaderHTTP(req))
if err != nil {
return nil, err
}
return toObjectACLRules(acls.Items), nil
opts := makeStorageOpts(true, a.retry, a.userProject)
return a.c.tc.ListDefaultObjectACLs(ctx, a.bucket, opts...)
}
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
a.configureCall(ctx, req)
return run(ctx, func() error {
return req.Do()
}, a.retry, false, setRetryHeaderHTTP(req))
opts := makeStorageOpts(false, a.retry, a.userProject)
return a.c.tc.DeleteDefaultObjectACL(ctx, a.bucket, entity, opts...)
}
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.BucketAccessControls
var err error
req := a.c.raw.BucketAccessControls.List(a.bucket)
a.configureCall(ctx, req)
err = run(ctx, func() error {
acls, err = req.Do()
return err
}, a.retry, true, setRetryHeaderHTTP(req))
if err != nil {
return nil, err
}
return toBucketACLRules(acls.Items), nil
opts := makeStorageOpts(true, a.retry, a.userProject)
return a.c.tc.ListBucketACLs(ctx, a.bucket, opts...)
}
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
acl := &raw.BucketAccessControl{
Bucket: a.bucket,
Entity: string(entity),
Role: string(role),
}
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
a.configureCall(ctx, req)
return run(ctx, func() error {
_, err := req.Do()
return err
}, a.retry, false, setRetryHeaderHTTP(req))
opts := makeStorageOpts(false, a.retry, a.userProject)
return a.c.tc.UpdateBucketACL(ctx, a.bucket, entity, role, opts...)
}
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
a.configureCall(ctx, req)
return run(ctx, func() error {
return req.Do()
}, a.retry, false, setRetryHeaderHTTP(req))
opts := makeStorageOpts(false, a.retry, a.userProject)
return a.c.tc.DeleteBucketACL(ctx, a.bucket, entity, opts...)
}
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
var acls *raw.ObjectAccessControls
var err error
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
a.configureCall(ctx, req)
err = run(ctx, func() error {
acls, err = req.Do()
return err
}, a.retry, true, setRetryHeaderHTTP(req))
if err != nil {
return nil, err
}
return toObjectACLRules(acls.Items), nil
opts := makeStorageOpts(true, a.retry, a.userProject)
return a.c.tc.ListObjectACLs(ctx, a.bucket, a.object, opts...)
}
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
type setRequest interface {
Do(opts ...googleapi.CallOption) (*raw.ObjectAccessControl, error)
Header() http.Header
}
acl := &raw.ObjectAccessControl{
Bucket: a.bucket,
Entity: string(entity),
Role: string(role),
}
var req setRequest
opts := makeStorageOpts(false, a.retry, a.userProject)
if isBucketDefault {
req = a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl)
} else {
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
return a.c.tc.UpdateDefaultObjectACL(ctx, a.bucket, entity, role, opts...)
}
a.configureCall(ctx, req)
return run(ctx, func() error {
_, err := req.Do()
return err
}, a.retry, false, setRetryHeaderHTTP(req))
return a.c.tc.UpdateObjectACL(ctx, a.bucket, a.object, entity, role, opts...)
}
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
a.configureCall(ctx, req)
return run(ctx, func() error {
return req.Do()
}, a.retry, false, setRetryHeaderHTTP(req))
opts := makeStorageOpts(false, a.retry, a.userProject)
return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...)
}
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {

View file

@ -20,7 +20,6 @@ import (
"encoding/json"
"errors"
"fmt"
"net/http"
"reflect"
"time"
@ -55,7 +54,8 @@ type BucketHandle struct {
// The supplied name must contain only lowercase letters, numbers, dashes,
// underscores, and dots. The full specification for valid bucket names can be
// found at:
// https://cloud.google.com/storage/docs/bucket-naming
//
// https://cloud.google.com/storage/docs/bucket-naming
func (c *Client) Bucket(name string) *BucketHandle {
retry := c.retry.clone()
return &BucketHandle{
@ -82,27 +82,11 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
defer func() { trace.EndSpan(ctx, err) }()
var bkt *raw.Bucket
if attrs != nil {
bkt = attrs.toRawBucket()
} else {
bkt = &raw.Bucket{}
o := makeStorageOpts(true, b.retry, b.userProject)
if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, o...); err != nil {
return err
}
bkt.Name = b.name
// If there is lifecycle information but no location, explicitly set
// the location. This is a GCS quirk/bug.
if bkt.Location == "" && bkt.Lifecycle != nil {
bkt.Location = "US"
}
req := b.c.raw.Buckets.Insert(projectID, bkt)
setClientHeader(req.Header())
if attrs != nil && attrs.PredefinedACL != "" {
req.PredefinedAcl(attrs.PredefinedACL)
}
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
}
return run(ctx, func() error { _, err := req.Context(ctx).Do(); return err }, b.retry, true, setRetryHeaderHTTP(req))
return nil
}
// Delete deletes the Bucket.
@ -110,24 +94,8 @@ func (b *BucketHandle) Delete(ctx context.Context) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newDeleteCall()
if err != nil {
return err
}
return run(ctx, func() error { return req.Context(ctx).Do() }, b.retry, true, setRetryHeaderHTTP(req))
}
func (b *BucketHandle) newDeleteCall() (*raw.BucketsDeleteCall, error) {
req := b.c.raw.Buckets.Delete(b.name)
setClientHeader(req.Header())
if err := applyBucketConds("BucketHandle.Delete", b.conds, req); err != nil {
return nil, err
}
if b.userProject != "" {
req.UserProject(b.userProject)
}
return req, nil
o := makeStorageOpts(true, b.retry, b.userProject)
return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...)
}
// ACL returns an ACLHandle, which provides access to the bucket's access control list.
@ -150,7 +118,8 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
//
// name must consist entirely of valid UTF-8-encoded runes. The full specification
// for valid object names can be found at:
// https://cloud.google.com/storage/docs/naming-objects
//
// https://cloud.google.com/storage/docs/naming-objects
func (b *BucketHandle) Object(name string) *ObjectHandle {
retry := b.retry.clone()
return &ObjectHandle{
@ -175,35 +144,8 @@ func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newGetCall()
if err != nil {
return nil, err
}
var resp *raw.Bucket
err = run(ctx, func() error {
resp, err = req.Context(ctx).Do()
return err
}, b.retry, true, setRetryHeaderHTTP(req))
var e *googleapi.Error
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
return nil, ErrBucketNotExist
}
if err != nil {
return nil, err
}
return newBucket(resp)
}
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
req := b.c.raw.Buckets.Get(b.name).Projection("full")
setClientHeader(req.Header())
if err := applyBucketConds("BucketHandle.Attrs", b.conds, req); err != nil {
return nil, err
}
if b.userProject != "" {
req.UserProject(b.userProject)
}
return req, nil
o := makeStorageOpts(true, b.retry, b.userProject)
return b.c.tc.GetBucket(ctx, b.name, b.conds, o...)
}
// Update updates a bucket's attributes.
@ -211,43 +153,9 @@ func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newPatchCall(&uattrs)
if err != nil {
return nil, err
}
if uattrs.PredefinedACL != "" {
req.PredefinedAcl(uattrs.PredefinedACL)
}
if uattrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
}
isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0
var rawBucket *raw.Bucket
call := func() error {
rb, err := req.Context(ctx).Do()
rawBucket = rb
return err
}
if err := run(ctx, call, b.retry, isIdempotent, setRetryHeaderHTTP(req)); err != nil {
return nil, err
}
return newBucket(rawBucket)
}
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
rb := uattrs.toRawBucket()
req := b.c.raw.Buckets.Patch(b.name, rb).Projection("full")
setClientHeader(req.Header())
if err := applyBucketConds("BucketHandle.Update", b.conds, req); err != nil {
return nil, err
}
if b.userProject != "" {
req.UserProject(b.userProject)
}
return req, nil
o := makeStorageOpts(isIdempotent, b.retry, b.userProject)
return b.c.tc.UpdateBucket(ctx, b.name, &uattrs, b.conds, o...)
}
// SignedURL returns a URL for the specified object. Signed URLs allow anyone
@ -1364,15 +1272,8 @@ func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
var metageneration int64
if b.conds != nil {
metageneration = b.conds.MetagenerationMatch
}
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
return run(ctx, func() error {
_, err := req.Context(ctx).Do()
return err
}, b.retry, true, setRetryHeaderHTTP(req))
o := makeStorageOpts(true, b.retry, b.userProject)
return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...)
}
// applyBucketConds modifies the provided call using the conditions in conds.
@ -1988,18 +1889,8 @@ func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *Custom
//
// Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
it := &ObjectIterator{
ctx: ctx,
bucket: b,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
if q != nil {
it.query = *q
}
return it
o := makeStorageOpts(true, b.retry, b.userProject)
return b.c.tc.ListObjects(ctx, b.name, q, o...)
}
// Retryer returns a bucket handle that is configured with custom retry
@ -2034,7 +1925,6 @@ func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle {
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
type ObjectIterator struct {
ctx context.Context
bucket *BucketHandle
query Query
pageInfo *iterator.PageInfo
nextFunc func() error
@ -2071,52 +1961,6 @@ func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
return item, nil
}
func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) {
req := it.bucket.c.raw.Objects.List(it.bucket.name)
setClientHeader(req.Header())
projection := it.query.Projection
if projection == ProjectionDefault {
projection = ProjectionFull
}
req.Projection(projection.String())
req.Delimiter(it.query.Delimiter)
req.Prefix(it.query.Prefix)
req.StartOffset(it.query.StartOffset)
req.EndOffset(it.query.EndOffset)
req.Versions(it.query.Versions)
req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter)
if len(it.query.fieldSelection) > 0 {
req.Fields("nextPageToken", googleapi.Field(it.query.fieldSelection))
}
req.PageToken(pageToken)
if it.bucket.userProject != "" {
req.UserProject(it.bucket.userProject)
}
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
var resp *raw.Objects
var err error
err = run(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
}, it.bucket.retry, true, setRetryHeaderHTTP(req))
if err != nil {
var e *googleapi.Error
if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
err = ErrBucketNotExist
}
return "", err
}
for _, item := range resp.Items {
it.items = append(it.items, newObject(item))
}
for _, prefix := range resp.Prefixes {
it.items = append(it.items, &ObjectAttrs{Prefix: prefix})
}
return resp.NextPageToken, nil
}
// Buckets returns an iterator over the buckets in the project. You may
// optionally set the iterator's Prefix field to restrict the list to buckets
// whose names begin with the prefix. By default, all buckets in the project
@ -2124,17 +1968,8 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
//
// Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
it := &BucketIterator{
ctx: ctx,
client: c,
projectID: projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.buckets) },
func() interface{} { b := it.buckets; it.buckets = nil; return b })
return it
o := makeStorageOpts(true, c.retry, "")
return c.tc.ListBuckets(ctx, projectID, o...)
}
// A BucketIterator is an iterator over BucketAttrs.
@ -2145,7 +1980,6 @@ type BucketIterator struct {
Prefix string
ctx context.Context
client *Client
projectID string
buckets []*BucketAttrs
pageInfo *iterator.PageInfo
@ -2171,36 +2005,6 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) {
// Note: This method is not safe for concurrent operations without explicit synchronization.
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
// TODO: When the transport-agnostic client interface is integrated into the Veneer,
// this method should be removed, and the iterator should be initialized by the
// transport-specific client implementations.
func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) {
req := it.client.raw.Buckets.List(it.projectID)
setClientHeader(req.Header())
req.Projection("full")
req.Prefix(it.Prefix)
req.PageToken(pageToken)
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
var resp *raw.Buckets
err = run(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
}, it.client.retry, true, setRetryHeaderHTTP(req))
if err != nil {
return "", err
}
for _, item := range resp.Items {
b, err := newBucket(item)
if err != nil {
return "", err
}
it.buckets = append(it.buckets, b)
}
return resp.NextPageToken, nil
}
// RPO (Recovery Point Objective) configures the turbo replication feature. See
// https://cloud.google.com/storage/docs/managing-turbo-replication for more information.
type RPO int

View file

@ -44,7 +44,7 @@ type storageClient interface {
// Top-level methods.
GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error)
CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator
Close() error
@ -162,6 +162,20 @@ func callSettings(defaults *settings, opts ...storageOption) *settings {
return &cs
}
// makeStorageOpts is a helper for generating a set of storageOption based on
// idempotency, retryConfig, and userProject. All top-level client operations
// will generally have to pass these options through the interface.
func makeStorageOpts(isIdempotent bool, retry *retryConfig, userProject string) []storageOption {
opts := []storageOption{idempotent(isIdempotent)}
if retry != nil {
opts = append(opts, withRetryConfig(retry))
}
if userProject != "" {
opts = append(opts, withUserProject(userProject))
}
return opts
}
// storageOption is the transport-agnostic call option for the storageClient
// interface.
type storageOption interface {
@ -267,13 +281,14 @@ type openWriterParams struct {
}
type newRangeReaderParams struct {
bucket string
conds *Conditions
encryptionKey []byte
gen int64
length int64
object string
offset int64
bucket string
conds *Conditions
encryptionKey []byte
gen int64
length int64
object string
offset int64
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
}
type composeObjectRequest struct {
@ -281,7 +296,6 @@ type composeObjectRequest struct {
dstObject destinationObject
srcs []sourceObject
predefinedACL string
encryptionKey []byte
sendCRC32C bool
}
@ -313,5 +327,6 @@ type rewriteObjectResponse struct {
resource *ObjectAttrs
done bool
written int64
size int64
token string
}

View file

@ -20,7 +20,6 @@ import (
"fmt"
"cloud.google.com/go/internal/trace"
raw "google.golang.org/api/storage/v1"
)
// CopierFrom creates a Copier that can copy src to dst.
@ -86,69 +85,57 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
}
if c.dst.gen != defaultGen {
return nil, fmt.Errorf("storage: generation cannot be specified on copy destination, got %v", c.dst.gen)
}
// Convert destination attributes to raw form, omitting the bucket.
// If the bucket is included but name or content-type aren't, the service
// returns a 400 with "Required" as the only message. Omitting the bucket
// does not cause any problems.
rawObject := c.ObjectAttrs.toRawObject("")
req := &rewriteObjectRequest{
srcObject: sourceObject{
name: c.src.object,
bucket: c.src.bucket,
gen: c.src.gen,
conds: c.src.conds,
encryptionKey: c.src.encryptionKey,
},
dstObject: destinationObject{
name: c.dst.object,
bucket: c.dst.bucket,
conds: c.dst.conds,
attrs: &c.ObjectAttrs,
encryptionKey: c.dst.encryptionKey,
keyName: c.DestinationKMSKeyName,
},
predefinedACL: c.PredefinedACL,
token: c.RewriteToken,
}
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
var userProject string
if c.dst.userProject != "" {
userProject = c.dst.userProject
} else if c.src.userProject != "" {
userProject = c.src.userProject
}
opts := makeStorageOpts(isIdempotent, c.dst.retry, userProject)
for {
res, err := c.callRewrite(ctx, rawObject)
res, err := c.dst.c.tc.RewriteObject(ctx, req, opts...)
if err != nil {
return nil, err
}
c.RewriteToken = res.token
if c.ProgressFunc != nil {
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
c.ProgressFunc(uint64(res.written), uint64(res.size))
}
if res.Done { // Finished successfully.
return newObject(res.Resource), nil
if res.done { // Finished successfully.
return res.resource, nil
}
}
}
func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.RewriteResponse, error) {
call := c.dst.c.raw.Objects.Rewrite(c.src.bucket, c.src.object, c.dst.bucket, c.dst.object, rawObj)
call.Context(ctx).Projection("full")
if c.RewriteToken != "" {
call.RewriteToken(c.RewriteToken)
}
if c.DestinationKMSKeyName != "" {
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
} else if c.src.userProject != "" {
call.UserProject(c.src.userProject)
}
if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil {
return nil, err
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}
if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil {
return nil, err
}
var res *raw.RewriteResponse
var err error
setClientHeader(call.Header())
retryCall := func() error { res, err = call.Do(); return err }
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil {
return nil, err
}
c.RewriteToken = res.RewriteToken
return res, nil
}
// ComposerFrom creates a Composer that can compose srcs into dst.
// You can immediately call Run on the returned Composer, or you can
// configure it first.
@ -188,6 +175,9 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
if err := c.dst.validate(); err != nil {
return nil, err
}
if c.dst.gen != defaultGen {
return nil, fmt.Errorf("storage: generation cannot be specified on compose destination, got %v", c.dst.gen)
}
if len(c.srcs) == 0 {
return nil, errors.New("storage: at least one source object must be specified")
}
@ -204,45 +194,29 @@ func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
}
}
// TODO: transport agnostic interface starts here.
req := &raw.ComposeRequest{}
// Compose requires a non-empty Destination, so we always set it,
// even if the caller-provided ObjectAttrs is the zero value.
req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket)
if c.SendCRC32C {
req.Destination.Crc32c = encodeUint32(c.ObjectAttrs.CRC32C)
req := &composeObjectRequest{
dstBucket: c.dst.bucket,
predefinedACL: c.PredefinedACL,
sendCRC32C: c.SendCRC32C,
}
req.dstObject = destinationObject{
name: c.dst.object,
bucket: c.dst.bucket,
conds: c.dst.conds,
attrs: &c.ObjectAttrs,
encryptionKey: c.dst.encryptionKey,
}
for _, src := range c.srcs {
srcObj := &raw.ComposeRequestSourceObjects{
Name: src.object,
s := sourceObject{
name: src.object,
bucket: src.bucket,
gen: src.gen,
conds: src.conds,
}
if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil {
return nil, err
}
req.SourceObjects = append(req.SourceObjects, srcObj)
req.srcs = append(req.srcs, s)
}
call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx)
if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
setClientHeader(call.Header())
retryCall := func() error { obj, err = call.Do(); return err }
isIdempotent := c.dst.conds != nil && (c.dst.conds.GenerationMatch != 0 || c.dst.conds.DoesNotExist)
if err := run(ctx, retryCall, c.dst.retry, isIdempotent, setRetryHeaderHTTP(call)); err != nil {
return nil, err
}
return newObject(obj), nil
opts := makeStorageOpts(isIdempotent, c.dst.retry, c.dst.userProject)
return c.dst.c.tc.ComposeObject(ctx, req, opts...)
}

View file

@ -22,16 +22,15 @@ https://cloud.google.com/storage/docs.
See https://pkg.go.dev/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
Creating a Client
# Creating a Client
To start working with this package, create a client:
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
The client will use your default application credentials. Clients should be
reused instead of created as needed. The methods of Client are safe for
@ -40,47 +39,47 @@ concurrent use by multiple goroutines.
If you only wish to access public data, you can create
an unauthenticated client with
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
To use an emulator with this library, you can set the STORAGE_EMULATOR_HOST
environment variable to the address at which your emulator is running. This will
send requests to that address instead of to Cloud Storage. You can then create
and use a client as usual:
// Set STORAGE_EMULATOR_HOST environment variable.
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
if err != nil {
// TODO: Handle error.
}
// Set STORAGE_EMULATOR_HOST environment variable.
err := os.Setenv("STORAGE_EMULATOR_HOST", "localhost:9000")
if err != nil {
// TODO: Handle error.
}
// Create client as usual.
client, err := storage.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// Create client as usual.
client, err := storage.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// This request is now directed to http://localhost:9000/storage/v1/b
// instead of https://storage.googleapis.com/storage/v1/b
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
// TODO: Handle error.
}
// This request is now directed to http://localhost:9000/storage/v1/b
// instead of https://storage.googleapis.com/storage/v1/b
if err := client.Bucket("my-bucket").Create(ctx, projectID, nil); err != nil {
// TODO: Handle error.
}
Please note that there is no official emulator for Cloud Storage.
Buckets
# Buckets
A Google Cloud Storage bucket is a collection of objects. To work with a
bucket, make a bucket handle:
bkt := client.Bucket(bucketName)
bkt := client.Bucket(bucketName)
A handle is a reference to a bucket. You can have a handle even if the
bucket doesn't exist yet. To create a bucket in Google Cloud Storage,
call Create on the handle:
if err := bkt.Create(ctx, projectID, nil); err != nil {
// TODO: Handle error.
}
if err := bkt.Create(ctx, projectID, nil); err != nil {
// TODO: Handle error.
}
Note that although buckets are associated with projects, bucket names are
global across all projects.
@ -90,14 +89,14 @@ BucketAttrs. The third argument to BucketHandle.Create allows you to set
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
Attrs:
attrs, err := bkt.Attrs(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
attrs, err := bkt.Attrs(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n",
attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass)
Objects
# Objects
An object holds arbitrary data as a sequence of bytes, like a file. You
refer to objects using a handle, just as with buckets, but unlike buckets
@ -105,78 +104,78 @@ you don't explicitly create an object. Instead, the first time you write
to an object it will be created. You can use the standard Go io.Reader
and io.Writer interfaces to read and write object data:
obj := bkt.Object("data")
// Write something to obj.
// w implements io.Writer.
w := obj.NewWriter(ctx)
// Write some text to obj. This will either create the object or overwrite whatever is there already.
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
// TODO: Handle error.
}
// Close, just like writing a file.
if err := w.Close(); err != nil {
// TODO: Handle error.
}
obj := bkt.Object("data")
// Write something to obj.
// w implements io.Writer.
w := obj.NewWriter(ctx)
// Write some text to obj. This will either create the object or overwrite whatever is there already.
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
// TODO: Handle error.
}
// Close, just like writing a file.
if err := w.Close(); err != nil {
// TODO: Handle error.
}
// Read it back.
r, err := obj.NewReader(ctx)
if err != nil {
// TODO: Handle error.
}
defer r.Close()
if _, err := io.Copy(os.Stdout, r); err != nil {
// TODO: Handle error.
}
// Prints "This object contains text."
// Read it back.
r, err := obj.NewReader(ctx)
if err != nil {
// TODO: Handle error.
}
defer r.Close()
if _, err := io.Copy(os.Stdout, r); err != nil {
// TODO: Handle error.
}
// Prints "This object contains text."
Objects also have attributes, which you can fetch with Attrs:
objAttrs, err := obj.Attrs(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("object %s has size %d and can be read using %s\n",
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
objAttrs, err := obj.Attrs(ctx)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("object %s has size %d and can be read using %s\n",
objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)
Listing objects
# Listing objects
Listing objects in a bucket is done with the Bucket.Objects method:
query := &storage.Query{Prefix: ""}
query := &storage.Query{Prefix: ""}
var names []string
it := bkt.Objects(ctx, query)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
names = append(names, attrs.Name)
}
var names []string
it := bkt.Objects(ctx, query)
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
names = append(names, attrs.Name)
}
Objects are listed lexicographically by name. To filter objects
lexicographically, Query.StartOffset and/or Query.EndOffset can be used:
query := &storage.Query{
Prefix: "",
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
}
query := &storage.Query{
Prefix: "",
StartOffset: "bar/", // Only list objects lexicographically >= "bar/"
EndOffset: "foo/", // Only list objects lexicographically < "foo/"
}
// ... as before
// ... as before
If only a subset of object attributes is needed when listing, specifying this
subset using Query.SetAttrSelection may speed up the listing process:
query := &storage.Query{Prefix: ""}
query.SetAttrSelection([]string{"Name"})
query := &storage.Query{Prefix: ""}
query.SetAttrSelection([]string{"Name"})
// ... as before
// ... as before
ACLs
# ACLs
Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of
ACLRules, each of which specifies the role of a user, group or project. ACLs
@ -186,17 +185,17 @@ https://cloud.google.com/storage/docs/access-control/iam).
To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:
acls, err := obj.ACL().List(ctx)
if err != nil {
// TODO: Handle error.
}
for _, rule := range acls {
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
}
acls, err := obj.ACL().List(ctx)
if err != nil {
// TODO: Handle error.
}
for _, rule := range acls {
fmt.Printf("%s has role %s\n", rule.Entity, rule.Role)
}
You can also set and delete ACLs.
Conditions
# Conditions
Every object has a generation and a metageneration. The generation changes
whenever the content changes, and the metageneration changes whenever the
@ -208,32 +207,32 @@ For example, say you've read an object's metadata into objAttrs. Now
you want to write to that object, but only if its contents haven't changed
since you read it. Here is how to express that:
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
// Proceed with writing as above.
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)
// Proceed with writing as above.
Signed URLs
# Signed URLs
You can obtain a URL that lets anyone read or write an object for a limited time.
Signing a URL requires credentials authorized to sign a URL. To use the same
authentication that was used when instantiating the Storage client, use the
BucketHandle.SignedURL method.
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
if err != nil {
// TODO: Handle error.
}
fmt.Println(url)
url, err := client.Bucket(bucketName).SignedURL(objectName, opts)
if err != nil {
// TODO: Handle error.
}
fmt.Println(url)
You can also sign a URL wihout creating a client. See the documentation of
SignedURL for details.
url, err := storage.SignedURL(bucketName, "shared-object", opts)
if err != nil {
// TODO: Handle error.
}
fmt.Println(url)
url, err := storage.SignedURL(bucketName, "shared-object", opts)
if err != nil {
// TODO: Handle error.
}
fmt.Println(url)
Post Policy V4 Signed Request
# Post Policy V4 Signed Request
A type of signed request that allows uploads through HTML forms directly to Cloud Storage with
temporary permission. Conditions can be applied to restrict how the HTML form is used and exercised
@ -242,13 +241,13 @@ by a user.
For more information, please see https://cloud.google.com/storage/docs/xml-api/post-object as well
as the documentation of BucketHandle.GenerateSignedPostPolicyV4.
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
pv4, err := client.Bucket(bucketName).GenerateSignedPostPolicyV4(objectName, opts)
if err != nil {
// TODO: Handle error.
}
fmt.Printf("URL: %s\nFields; %v\n", pv4.URL, pv4.Fields)
Errors
# Errors
Errors returned by this client are often of the type googleapi.Error.
These errors can be introspected for more information by using errors.As
@ -261,7 +260,7 @@ with the richer googleapi.Error type. For example:
See https://pkg.go.dev/google.golang.org/api/googleapi#Error for more information.
Retrying failed requests
# Retrying failed requests
Methods in this package may retry calls that fail with transient errors.
Retrying continues indefinitely unless the controlling context is canceled, the

View file

@ -19,6 +19,7 @@ import (
"encoding/base64"
"fmt"
"io"
"net/url"
"os"
"cloud.google.com/go/internal/trace"
@ -44,6 +45,12 @@ const (
// This is only used for the gRPC client.
defaultConnPoolSize = 4
// maxPerMessageWriteSize is the maximum amount of content that can be sent
// per WriteObjectRequest message. A buffer reaching this amount will
// precipitate a flush of the buffer. It is only used by the gRPC Writer
// implementation.
maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES)
// globalProjectAlias is the project ID alias used for global buckets.
//
// This is only used for the gRPC API.
@ -133,10 +140,10 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin
return resp.EmailAddress, err
}
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
b := attrs.toProtoBucket()
b.Name = bucket
// If there is lifecycle information but no location, explicitly set
// the location. This is a GCS quirk/bug.
if b.GetLocation() == "" && b.GetLifecycle() != nil {
@ -727,7 +734,7 @@ func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjec
dstObjPb := req.dstObject.attrs.toProtoObject(req.dstBucket)
dstObjPb.Name = req.dstObject.name
if err := applyCondsProto("ComposeObject destination", -1, req.dstObject.conds, dstObjPb); err != nil {
if err := applyCondsProto("ComposeObject destination", defaultGen, req.dstObject.conds, dstObjPb); err != nil {
return nil, err
}
if req.sendCRC32C {
@ -750,8 +757,8 @@ func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjec
if req.predefinedACL != "" {
rawReq.DestinationPredefinedAcl = req.predefinedACL
}
if req.encryptionKey != nil {
rawReq.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.encryptionKey)
if req.dstObject.encryptionKey != nil {
rawReq.CommonObjectRequestParams = toProtoCommonObjectRequestParams(req.dstObject.encryptionKey)
}
var obj *storagepb.Object
@ -811,6 +818,7 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
r := &rewriteObjectResponse{
done: res.GetDone(),
written: res.GetTotalBytesRewritten(),
size: res.GetObjectSize(),
token: res.GetRewriteToken(),
resource: newObjectFromProto(res.GetResource()),
}
@ -822,14 +830,12 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
if params.conds != nil {
if err := params.conds.validate("grpcStorageClient.NewRangeReader"); err != nil {
return nil, err
}
}
s := callSettings(c.settings, opts...)
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
// A negative length means "read to the end of the object", but the
// read_limit field it corresponds to uses zero to mean the same thing. Thus
// we coerce the length to 0 to read to the end of the object.
@ -1513,7 +1519,8 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
// The first message on the WriteObject stream must either be the
// Object or the Resumable Upload ID.
if first {
w.stream, err = w.c.raw.WriteObject(w.ctx)
ctx := gapic.InsertMetadata(w.ctx, metadata.Pairs("x-goog-request-params", "bucket="+url.QueryEscape(w.bucket)))
w.stream, err = w.c.raw.WriteObject(ctx)
if err != nil {
return nil, 0, false, err
}
@ -1646,8 +1653,8 @@ func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) {
spec := &storagepb.WriteObjectSpec{
Resource: attrs.toProtoObject(w.bucket),
}
// WriteObject doesn't support the generation condition, so use -1.
if err := applyCondsProto("WriteObject", -1, w.conds, spec); err != nil {
// WriteObject doesn't support the generation condition, so use default.
if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil {
return nil, err
}
return spec, nil

View file

@ -91,7 +91,7 @@ type HMACKeyHandle struct {
projectID string
accessID string
retry *retryConfig
raw *raw.ProjectsHmacKeysService
tc storageClient
}
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
@ -102,7 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
projectID: projectID,
accessID: accessID,
retry: c.retry,
raw: raw.NewProjectsHmacKeysService(c.raw),
tc: c.tc,
}
}
@ -114,32 +114,15 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) {
call := hkh.raw.Get(hkh.projectID, hkh.accessID)
desc := new(hmacKeyDesc)
for _, opt := range opts {
opt.withHMACKeyDesc(desc)
}
if desc.userProjectID != "" {
call = call.UserProject(desc.userProjectID)
}
setClientHeader(call.Header())
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
hk, err := hkh.tc.GetHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
var metadata *raw.HmacKeyMetadata
var err error
err = run(ctx, func() error {
metadata, err = call.Context(ctx).Do()
return err
}, hkh.retry, true, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}
hk := &raw.HmacKey{
Metadata: metadata,
}
return toHMACKeyFromRaw(hk, false)
return hk, err
}
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
@ -148,19 +131,13 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error {
delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID)
desc := new(hmacKeyDesc)
for _, opt := range opts {
opt.withHMACKeyDesc(desc)
}
if desc.userProjectID != "" {
delCall = delCall.UserProject(desc.userProjectID)
}
setClientHeader(delCall.Header())
return run(ctx, func() error {
return delCall.Context(ctx).Do()
}, hkh.retry, true, setRetryHeaderHTTP(delCall))
o := makeStorageOpts(true, hkh.retry, desc.userProjectID)
return hkh.tc.DeleteHMACKey(ctx, hkh.projectID, hkh.accessID, o...)
}
func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
@ -220,29 +197,14 @@ func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEma
return nil, errors.New("storage: expecting a non-blank service account email")
}
svc := raw.NewProjectsHmacKeysService(c.raw)
call := svc.Create(projectID, serviceAccountEmail)
desc := new(hmacKeyDesc)
for _, opt := range opts {
opt.withHMACKeyDesc(desc)
}
if desc.userProjectID != "" {
call = call.UserProject(desc.userProjectID)
}
setClientHeader(call.Header())
var hk *raw.HmacKey
if err := run(ctx, func() error {
h, err := call.Context(ctx).Do()
hk = h
return err
}, c.retry, false, setRetryHeaderHTTP(call)); err != nil {
return nil, err
}
return toHMACKeyFromRaw(hk, true)
o := makeStorageOpts(false, c.retry, desc.userProjectID)
hk, err := c.tc.CreateHMACKey(ctx, projectID, serviceAccountEmail, o...)
return hk, err
}
// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated.
@ -264,35 +226,15 @@ func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opt
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
}
call := h.raw.Update(h.projectID, h.accessID, &raw.HmacKeyMetadata{
Etag: au.Etag,
State: string(au.State),
})
desc := new(hmacKeyDesc)
for _, opt := range opts {
opt.withHMACKeyDesc(desc)
}
if desc.userProjectID != "" {
call = call.UserProject(desc.userProjectID)
}
setClientHeader(call.Header())
var metadata *raw.HmacKeyMetadata
var err error
isIdempotent := len(au.Etag) > 0
err = run(ctx, func() error {
metadata, err = call.Context(ctx).Do()
return err
}, h.retry, isIdempotent, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}
hk := &raw.HmacKey{
Metadata: metadata,
}
return toHMACKeyFromRaw(hk, false)
o := makeStorageOpts(isIdempotent, h.retry, desc.userProjectID)
hk, err := h.tc.UpdateHMACKey(ctx, h.projectID, desc.forServiceAccountEmail, h.accessID, &au, o...)
return hk, err
}
// An HMACKeysIterator is an iterator over HMACKeys.
@ -318,27 +260,13 @@ type HMACKeysIterator struct {
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
it := &HMACKeysIterator{
ctx: ctx,
raw: raw.NewProjectsHmacKeysService(c.raw),
projectID: projectID,
retry: c.retry,
}
desc := new(hmacKeyDesc)
for _, opt := range opts {
opt.withHMACKeyDesc(&it.desc)
opt.withHMACKeyDesc(desc)
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.hmacKeys) - it.index },
func() interface{} {
prev := it.hmacKeys
it.hmacKeys = it.hmacKeys[:0]
it.index = 0
return prev
})
return it
o := makeStorageOpts(true, c.retry, desc.userProjectID)
return c.tc.ListHMACKeys(ctx, projectID, desc.forServiceAccountEmail, desc.showDeletedKeys, o...)
}
// Next returns the next result. Its second return value is iterator.Done if

View file

@ -159,7 +159,7 @@ func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project strin
return res.EmailAddress, nil
}
func (c *httpStorageClient) CreateBucket(ctx context.Context, project string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
var bkt *raw.Bucket
if attrs != nil {
@ -167,7 +167,7 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project string, at
} else {
bkt = &raw.Bucket{}
}
bkt.Name = bucket
// If there is lifecycle information but no location, explicitly set
// the location. This is a GCS quirk/bug.
if bkt.Location == "" && bkt.Lifecycle != nil {
@ -692,7 +692,7 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec
}
call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq).Context(ctx)
if err := applyConds("ComposeFrom destination", -1, req.dstObject.conds, call); err != nil {
if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil {
return nil, err
}
if s.userProject != "" {
@ -701,7 +701,7 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec
if req.predefinedACL != "" {
call.DestinationPredefinedAcl(req.predefinedACL)
}
if err := setEncryptionHeaders(call.Header(), req.encryptionKey, false); err != nil {
if err := setEncryptionHeaders(call.Header(), req.dstObject.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
@ -760,6 +760,7 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
r := &rewriteObjectResponse{
done: res.Done,
written: res.TotalBytesRewritten,
size: res.ObjectSize,
token: res.RewriteToken,
resource: newObject(res.Resource),
}
@ -773,14 +774,6 @@ func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRange
s := callSettings(c.settings, opts...)
if params.offset < 0 && params.length >= 0 {
return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", params.offset)
}
if params.conds != nil {
if err := params.conds.validate("NewRangeReader"); err != nil {
return nil, err
}
}
u := &url.URL{
Scheme: c.scheme,
Host: c.readHost,
@ -798,10 +791,9 @@ func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRange
if s.userProject != "" {
req.Header.Set("X-Goog-User-Project", s.userProject)
}
// TODO(noahdietz): add option for readCompressed.
// if o.readCompressed {
// req.Header.Set("Accept-Encoding", "gzip")
// }
if params.readCompressed {
req.Header.Set("Accept-Encoding", "gzip")
}
if err := setEncryptionHeaders(req.Header, params.encryptionKey, false); err != nil {
return nil, err
}
@ -1026,7 +1018,7 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage
return
}
var resp *raw.Object
err := applyConds("NewWriter", params.attrs.Generation, params.conds, call)
err := applyConds("NewWriter", defaultGen, params.conds, call)
if err == nil {
if s.userProject != "" {
call.UserProject(s.userProject)

View file

@ -27,17 +27,17 @@ import (
// IAM provides access to IAM access control for the bucket.
func (b *BucketHandle) IAM() *iam.Handle {
return iam.InternalNewHandleClient(&iamClient{
raw: b.c.raw,
userProject: b.userProject,
retry: b.retry,
client: b.c,
}, b.name)
}
// iamClient implements the iam.client interface.
type iamClient struct {
raw *raw.Service
userProject string
retry *retryConfig
client *Client
}
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
@ -48,57 +48,25 @@ func (c *iamClient) GetWithVersion(ctx context.Context, resource string, request
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(requestedPolicyVersion))
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var rp *raw.Policy
err = run(ctx, func() error {
rp, err = call.Context(ctx).Do()
return err
}, c.retry, true, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}
return iamFromStoragePolicy(rp), nil
o := makeStorageOpts(true, c.retry, c.userProject)
return c.client.tc.GetIamPolicy(ctx, resource, requestedPolicyVersion, o...)
}
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
defer func() { trace.EndSpan(ctx, err) }()
rp := iamToStoragePolicy(p)
call := c.raw.Buckets.SetIamPolicy(resource, rp)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
isIdempotent := len(p.Etag) > 0
return run(ctx, func() error {
_, err := call.Context(ctx).Do()
return err
}, c.retry, isIdempotent, setRetryHeaderHTTP(call))
o := makeStorageOpts(isIdempotent, c.retry, c.userProject)
return c.client.tc.SetIamPolicy(ctx, resource, p, o...)
}
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.TestIamPermissions(resource, perms)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var res *raw.TestIamPermissionsResponse
err = run(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
}, c.retry, true, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}
return res.Permissions, nil
o := makeStorageOpts(true, c.retry, c.userProject)
return c.client.tc.TestIamPermissions(ctx, resource, perms, o...)
}
func iamToStoragePolicy(ip *iampb.Policy) *raw.Policy {

View file

@ -19,43 +19,44 @@
//
// Lets you store and retrieve potentially-large, immutable data objects.
//
// NOTE: This package is in alpha. It is not stable, and is likely to change.
// NOTE: This package is in alpha. It is not stable, and is likely to change.
//
// Example usage
// # Example usage
//
// To get started with this package, create a client.
// ctx := context.Background()
// c, err := storage.NewClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// defer c.Close()
//
// ctx := context.Background()
// c, err := storage.NewClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// defer c.Close()
//
// The client will use your default application credentials. Clients should be reused instead of created as needed.
// The methods of Client are safe for concurrent use by multiple goroutines.
// The returned client must be Closed when it is done being used.
//
// Using the Client
// # Using the Client
//
// The following is an example of making an API call with the newly created client.
//
// ctx := context.Background()
// c, err := storage.NewClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// defer c.Close()
// ctx := context.Background()
// c, err := storage.NewClient(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// defer c.Close()
//
// req := &storagepb.DeleteBucketRequest{
// // TODO: Fill request struct fields.
// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest.
// }
// err = c.DeleteBucket(ctx, req)
// if err != nil {
// // TODO: Handle error.
// }
// req := &storagepb.DeleteBucketRequest{
// // TODO: Fill request struct fields.
// // See https://pkg.go.dev/cloud.google.com/go/storage/internal/apiv2/stubs#DeleteBucketRequest.
// }
// err = c.DeleteBucket(ctx, req)
// if err != nil {
// // TODO: Handle error.
// }
//
// Use of Context
// # Use of Context
//
// The ctx passed to NewClient is used for authentication requests and
// for creating the underlying connection, but is not used for subsequent calls.

View file

@ -10,6 +10,11 @@
"grpc": {
"libraryClient": "Client",
"rpcs": {
"CancelResumableWrite": {
"methods": [
"CancelResumableWrite"
]
},
"ComposeObject": {
"methods": [
"ComposeObject"

View file

@ -0,0 +1,26 @@
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"google.golang.org/grpc/metadata"
)
// InsertMetadata inserts the given gRPC metadata into the outgoing context.
func InsertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
return insertMetadata(ctx, mds...)
}

View file

@ -18,7 +18,11 @@ package storage
import (
"context"
"fmt"
"math"
"net/url"
"regexp"
"strings"
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
gax "github.com/googleapis/gax-go/v2"
@ -51,6 +55,7 @@ type CallOptions struct {
ListNotifications []gax.CallOption
ComposeObject []gax.CallOption
DeleteObject []gax.CallOption
CancelResumableWrite []gax.CallOption
GetObject []gax.CallOption
ReadObject []gax.CallOption
UpdateObject []gax.CallOption
@ -96,6 +101,7 @@ func defaultCallOptions() *CallOptions {
ListNotifications: []gax.CallOption{},
ComposeObject: []gax.CallOption{},
DeleteObject: []gax.CallOption{},
CancelResumableWrite: []gax.CallOption{},
GetObject: []gax.CallOption{},
ReadObject: []gax.CallOption{},
UpdateObject: []gax.CallOption{},
@ -133,6 +139,7 @@ type internalClient interface {
ListNotifications(context.Context, *storagepb.ListNotificationsRequest, ...gax.CallOption) *NotificationIterator
ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error
CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error)
GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error)
UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
@ -158,22 +165,22 @@ type internalClient interface {
//
// Resources are named as follows:
//
// Projects are referred to as they are defined by the Resource Manager API,
// using strings like projects/123456 or projects/my-string-id.
// Projects are referred to as they are defined by the Resource Manager API,
// using strings like projects/123456 or projects/my-string-id.
//
// Buckets are named using string names of the form:
// projects/{project}/buckets/{bucket}
// For globally unique buckets, _ may be substituted for the project.
// Buckets are named using string names of the form:
// projects/{project}/buckets/{bucket}
// For globally unique buckets, _ may be substituted for the project.
//
// Objects are uniquely identified by their name along with the name of the
// bucket they belong to, as separate strings in this API. For example:
// Objects are uniquely identified by their name along with the name of the
// bucket they belong to, as separate strings in this API. For example:
//
// ReadObjectRequest {
// bucket: projects/_/buckets/my-bucket
// object: my-object
// }
// Note that object names can contain / characters, which are treated as
// any other character (no special directory semantics).
// ReadObjectRequest {
// bucket: projects/_/buckets/my-bucket
// object: my-object
// }
// Note that object names can contain / characters, which are treated as
// any other character (no special directory semantics).
type Client struct {
// The internal transport-dependent client.
internalClient internalClient
@ -229,17 +236,17 @@ func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.L
return c.internalClient.LockBucketRetentionPolicy(ctx, req, opts...)
}
// GetIamPolicy gets the IAM policy for a specified bucket.
// GetIamPolicy gets the IAM policy for a specified bucket or object.
func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
return c.internalClient.GetIamPolicy(ctx, req, opts...)
}
// SetIamPolicy updates an IAM policy for the specified bucket.
// SetIamPolicy updates an IAM policy for the specified bucket or object.
func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
return c.internalClient.SetIamPolicy(ctx, req, opts...)
}
// TestIamPermissions tests a set of permissions on the given bucket to see which, if
// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if
// any, are held by the caller.
func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
return c.internalClient.TestIamPermissions(ctx, req, opts...)
@ -280,12 +287,16 @@ func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObject
}
// DeleteObject deletes an object and its metadata. Deletions are permanent if versioning
// is not enabled for the bucket, or if the generation parameter
// is used.
// is not enabled for the bucket, or if the generation parameter is used.
func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error {
return c.internalClient.DeleteObject(ctx, req, opts...)
}
// CancelResumableWrite cancels an in-progress resumable upload.
func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) {
return c.internalClient.CancelResumableWrite(ctx, req, opts...)
}
// GetObject retrieves an objects metadata.
func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
return c.internalClient.GetObject(ctx, req, opts...)
@ -319,33 +330,33 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe
// error or an error response from the server), the client should do as
// follows:
//
// Check the result Status of the stream, to determine if writing can be
// resumed on this stream or must be restarted from scratch (by calling
// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED,
// INTERNAL, and UNAVAILABLE. For each case, the client should use binary
// exponential backoff before retrying. Additionally, writes can be
// resumed after RESOURCE_EXHAUSTED errors, but only after taking
// appropriate measures, which may include reducing aggregate send rate
// across clients and/or requesting a quota increase for your project.
// Check the result Status of the stream, to determine if writing can be
// resumed on this stream or must be restarted from scratch (by calling
// StartResumableWrite()). The resumable errors are DEADLINE_EXCEEDED,
// INTERNAL, and UNAVAILABLE. For each case, the client should use binary
// exponential backoff before retrying. Additionally, writes can be
// resumed after RESOURCE_EXHAUSTED errors, but only after taking
// appropriate measures, which may include reducing aggregate send rate
// across clients and/or requesting a quota increase for your project.
//
// If the call to WriteObject returns ABORTED, that indicates
// concurrent attempts to update the resumable write, caused either by
// multiple racing clients or by a single client where the previous
// request was timed out on the client side but nonetheless reached the
// server. In this case the client should take steps to prevent further
// concurrent writes (e.g., increase the timeouts, stop using more than
// one process to perform the upload, etc.), and then should follow the
// steps below for resuming the upload.
// If the call to WriteObject returns ABORTED, that indicates
// concurrent attempts to update the resumable write, caused either by
// multiple racing clients or by a single client where the previous
// request was timed out on the client side but nonetheless reached the
// server. In this case the client should take steps to prevent further
// concurrent writes (e.g., increase the timeouts, stop using more than
// one process to perform the upload, etc.), and then should follow the
// steps below for resuming the upload.
//
// For resumable errors, the client should call QueryWriteStatus() and
// then continue writing from the returned persisted_size. This may be
// less than the amount of data the client previously sent. Note also that
// it is acceptable to send data starting at an offset earlier than the
// returned persisted_size; in this case, the service will skip data at
// offsets that were already persisted (without checking that it matches
// the previously written data), and write only the data starting from the
// persisted offset. This behavior can make client-side handling simpler
// in some cases.
// For resumable errors, the client should call QueryWriteStatus() and
// then continue writing from the returned persisted_size. This may be
// less than the amount of data the client previously sent. Note also that
// it is acceptable to send data starting at an offset earlier than the
// returned persisted_size; in this case, the service will skip data at
// offsets that were already persisted (without checking that it matches
// the previously written data), and write only the data starting from the
// persisted offset. This behavior can make client-side handling simpler
// in some cases.
//
// The service will not view the object as complete until the client has
// sent a WriteObjectRequest with finish_write set to true. Sending any
@ -455,22 +466,22 @@ type gRPCClient struct {
//
// Resources are named as follows:
//
// Projects are referred to as they are defined by the Resource Manager API,
// using strings like projects/123456 or projects/my-string-id.
// Projects are referred to as they are defined by the Resource Manager API,
// using strings like projects/123456 or projects/my-string-id.
//
// Buckets are named using string names of the form:
// projects/{project}/buckets/{bucket}
// For globally unique buckets, _ may be substituted for the project.
// Buckets are named using string names of the form:
// projects/{project}/buckets/{bucket}
// For globally unique buckets, _ may be substituted for the project.
//
// Objects are uniquely identified by their name along with the name of the
// bucket they belong to, as separate strings in this API. For example:
// Objects are uniquely identified by their name along with the name of the
// bucket they belong to, as separate strings in this API. For example:
//
// ReadObjectRequest {
// bucket: projects/_/buckets/my-bucket
// object: my-object
// }
// Note that object names can contain / characters, which are treated as
// any other character (no special directory semantics).
// ReadObjectRequest {
// bucket: projects/_/buckets/my-bucket
// object: my-object
// }
// Note that object names can contain / characters, which are treated as
// any other character (no special directory semantics).
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
clientOpts := defaultGRPCClientOptions()
if newClientHook != nil {
@ -528,7 +539,18 @@ func (c *gRPCClient) Close() error {
}
func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBucketRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@ -539,7 +561,18 @@ func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBuck
}
func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...)
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -612,7 +645,18 @@ func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBuckets
}
func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.LockBucketRetentionPolicyRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).LockBucketRetentionPolicy[0:len((*c.CallOptions).LockBucketRetentionPolicy):len((*c.CallOptions).LockBucketRetentionPolicy)], opts...)
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -627,7 +671,21 @@ func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storage
}
func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).GetIamPolicy[0:len((*c.CallOptions).GetIamPolicy):len((*c.CallOptions).GetIamPolicy)], opts...)
var resp *iampb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -642,7 +700,21 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe
}
func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).SetIamPolicy[0:len((*c.CallOptions).SetIamPolicy):len((*c.CallOptions).SetIamPolicy)], opts...)
var resp *iampb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -657,7 +729,21 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe
}
func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).TestIamPermissions[0:len((*c.CallOptions).TestIamPermissions):len((*c.CallOptions).TestIamPermissions)], opts...)
var resp *iampb.TestIamPermissionsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -672,7 +758,18 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP
}
func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRequest, opts ...gax.CallOption) (*storagepb.Bucket, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket().GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket().GetName())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...)
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -687,7 +784,18 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck
}
func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.DeleteNotificationRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).DeleteNotification[0:len((*c.CallOptions).DeleteNotification):len((*c.CallOptions).DeleteNotification)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@ -698,7 +806,18 @@ func (c *gRPCClient) DeleteNotification(ctx context.Context, req *storagepb.Dele
}
func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).GetNotification[0:len((*c.CallOptions).GetNotification):len((*c.CallOptions).GetNotification)], opts...)
var resp *storagepb.Notification
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -713,7 +832,18 @@ func (c *gRPCClient) GetNotification(ctx context.Context, req *storagepb.GetNoti
}
func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.CreateNotificationRequest, opts ...gax.CallOption) (*storagepb.Notification, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).CreateNotification[0:len((*c.CallOptions).CreateNotification):len((*c.CallOptions).CreateNotification)], opts...)
var resp *storagepb.Notification
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -728,7 +858,18 @@ func (c *gRPCClient) CreateNotification(ctx context.Context, req *storagepb.Crea
}
func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListNotificationsRequest, opts ...gax.CallOption) *NotificationIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).ListNotifications[0:len((*c.CallOptions).ListNotifications):len((*c.CallOptions).ListNotifications)], opts...)
it := &NotificationIterator{}
req = proto.Clone(req).(*storagepb.ListNotificationsRequest)
@ -771,7 +912,18 @@ func (c *gRPCClient) ListNotifications(ctx context.Context, req *storagepb.ListN
}
func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetDestination().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestination().GetBucket())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).ComposeObject[0:len((*c.CallOptions).ComposeObject):len((*c.CallOptions).ComposeObject)], opts...)
var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -786,7 +938,18 @@ func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeOb
}
func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@ -796,8 +959,45 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje
return err
}
func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) {
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).CancelResumableWrite[0:len((*c.CallOptions).CancelResumableWrite):len((*c.CallOptions).CancelResumableWrite)], opts...)
var resp *storagepb.CancelResumableWriteResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).GetObject[0:len((*c.CallOptions).GetObject):len((*c.CallOptions).GetObject)], opts...)
var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -812,7 +1012,18 @@ func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequ
}
func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
var resp storagepb.Storage_ReadObjectClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
@ -826,7 +1037,18 @@ func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRe
}
func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetObject().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetObject().GetBucket())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).UpdateObject[0:len((*c.CallOptions).UpdateObject):len((*c.CallOptions).UpdateObject)], opts...)
var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -856,7 +1078,18 @@ func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (s
}
func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).ListObjects[0:len((*c.CallOptions).ListObjects):len((*c.CallOptions).ListObjects)], opts...)
it := &ObjectIterator{}
req = proto.Clone(req).(*storagepb.ListObjectsRequest)
@ -899,7 +1132,21 @@ func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjects
}
func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteObjectRequest, opts ...gax.CallOption) (*storagepb.RewriteResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetSourceBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])) > 0 {
routingHeadersMap["source_bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetSourceBucket())[1])
}
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetDestinationBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetDestinationBucket())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).RewriteObject[0:len((*c.CallOptions).RewriteObject):len((*c.CallOptions).RewriteObject)], opts...)
var resp *storagepb.RewriteResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -914,7 +1161,18 @@ func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteOb
}
func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>.*)"); reg.MatchString(req.GetWriteObjectSpec().GetResource().GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetWriteObjectSpec().GetResource().GetBucket())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).StartResumableWrite[0:len((*c.CallOptions).StartResumableWrite):len((*c.CallOptions).StartResumableWrite)], opts...)
var resp *storagepb.StartResumableWriteResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
@ -929,7 +1187,18 @@ func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.Sta
}
func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) {
ctx = insertMetadata(ctx, c.xGoogMetadata)
routingHeaders := ""
routingHeadersMap := make(map[string]string)
if reg := regexp.MustCompile("(?P<bucket>projects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetUploadId()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetUploadId())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
routingHeaders = strings.TrimSuffix(routingHeaders, "&")
md := metadata.Pairs("x-goog-request-params", routingHeaders)
ctx = insertMetadata(ctx, c.xGoogMetadata, md)
opts = append((*c.CallOptions).QueryWriteStatus[0:len((*c.CallOptions).QueryWriteStatus):len((*c.CallOptions).QueryWriteStatus)], opts...)
var resp *storagepb.QueryWriteStatusResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {

File diff suppressed because it is too large Load diff

View file

@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "1.24.0"
const Version = "1.25.0"

View file

@ -157,21 +157,10 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re
if n.TopicID == "" {
return nil, errors.New("storage: AddNotification: missing TopicID")
}
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
var rn *raw.Notification
err = run(ctx, func() error {
rn, err = call.Context(ctx).Do()
return err
}, b.retry, false, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}
return toNotification(rn), nil
opts := makeStorageOpts(false, b.retry, b.userProject)
ret, err = b.c.tc.CreateNotification(ctx, b.name, n, opts...)
return ret, err
}
// Notifications returns all the Notifications configured for this bucket, as a map
@ -180,20 +169,9 @@ func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notific
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.List(b.name)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
var res *raw.Notifications
err = run(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
}, b.retry, true, setRetryHeaderHTTP(call))
if err != nil {
return nil, err
}
return notificationsToMap(res.Items), nil
opts := makeStorageOpts(true, b.retry, b.userProject)
n, err = b.c.tc.ListNotifications(ctx, b.name, opts...)
return n, err
}
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
@ -217,12 +195,6 @@ func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err e
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.Delete(b.name, id)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
return run(ctx, func() error {
return call.Context(ctx).Do()
}, b.retry, true, setRetryHeaderHTTP(call))
opts := makeStorageOpts(true, b.retry, b.userProject)
return b.c.tc.DeleteNotification(ctx, b.name, id, opts...)
}

View file

@ -16,19 +16,15 @@ package storage
import (
"context"
"errors"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/googleapi"
)
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
@ -94,10 +90,6 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
if o.c.tc != nil {
return o.newRangeReaderWithGRPC(ctx, offset, length)
}
if err := o.validate(); err != nil {
return nil, err
}
@ -109,208 +101,31 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
return nil, err
}
}
u := &url.URL{
Scheme: o.c.scheme,
Host: o.c.readHost,
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
}
verb := "GET"
if length == 0 {
verb = "HEAD"
}
req, err := http.NewRequest(verb, u.String(), nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
if o.readCompressed {
req.Header.Set("Accept-Encoding", "gzip")
}
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
opts := makeStorageOpts(true, o.retry, o.userProject)
params := &newRangeReaderParams{
bucket: o.bucket,
object: o.object,
gen: o.gen,
offset: offset,
length: length,
encryptionKey: o.encryptionKey,
conds: o.conds,
readCompressed: o.readCompressed,
}
gen := o.gen
r, err = o.c.tc.NewRangeReader(ctx, params, opts...)
// Define a function that initiates a Read with offset and length, assuming we
// have already read seen bytes.
reopen := func(seen int64) (*http.Response, error) {
// If the context has already expired, return immediately without making a
// call.
if err := ctx.Err(); err != nil {
return nil, err
}
start := offset + seen
if length < 0 && start < 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d", start))
} else if length < 0 && start > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
} else if length > 0 {
// The end character isn't affected by how many bytes we've seen.
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
}
// We wait to assign conditions here because the generation number can change in between reopen() runs.
if err := setConditionsHeaders(req.Header, o.conds); err != nil {
return nil, err
}
// If an object generation is specified, include generation as query string parameters.
if gen >= 0 {
req.URL.RawQuery = fmt.Sprintf("generation=%d", gen)
}
var res *http.Response
err = run(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
partialContentNotSatisfied :=
!decompressiveTranscoding(res) &&
start > 0 && length != 0 &&
res.StatusCode != http.StatusPartialContent
if partialContentNotSatisfied {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
// With "Content-Encoding": "gzip" aka decompressive transcoding, GCS serves
// back the whole file regardless of the range count passed in as per:
// https://cloud.google.com/storage/docs/transcoding#range,
// thus we have to manually move the body forward by seen bytes.
if decompressiveTranscoding(res) && seen > 0 {
_, _ = io.CopyN(ioutil.Discard, res.Body, seen)
}
// If a generation hasn't been specified, and this is the first response we get, let's record the
// generation. In future requests we'll use this generation as a precondition to avoid data races.
if gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
if err != nil {
return err
}
gen = gen64
}
return nil
}, o.retry, true, setRetryHeaderHTTP(&readerRequestWrapper{req}))
if err != nil {
return nil, err
}
return res, nil
}
res, err := reopen(0)
if err != nil {
return nil, err
}
var (
size int64 // total size of object, even if a range was requested.
checkCRC bool
crc uint32
startOffset int64 // non-zero if range request.
)
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
// Content range is formatted <first byte>-<last byte>/<total size>. We take
// the total size.
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
dashIndex := strings.Index(cr, "-")
if dashIndex >= 0 {
startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err)
}
}
} else {
size = res.ContentLength
// Check the CRC iff all of the following hold:
// - We asked for content (length != 0).
// - We got all the content (status != PartialContent).
// - The server sent a CRC header.
// - The Go http stack did not uncompress the file.
// - We were not served compressed data that was uncompressed on download.
// The problem with the last two cases is that the CRC will not match -- GCS
// computes it on the compressed contents, but we compute it on the
// uncompressed contents.
if length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
crc, checkCRC = parseCRC32c(res)
}
}
remain := res.ContentLength
body := res.Body
if length == 0 {
remain = 0
body.Close()
body = emptyBody
}
var metaGen int64
if res.Header.Get("X-Goog-Metageneration") != "" {
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64)
if err != nil {
return nil, err
}
}
var lm time.Time
if res.Header.Get("Last-Modified") != "" {
lm, err = http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
return nil, err
}
}
attrs := ReaderObjectAttrs{
Size: size,
ContentType: res.Header.Get("Content-Type"),
ContentEncoding: res.Header.Get("Content-Encoding"),
CacheControl: res.Header.Get("Cache-Control"),
LastModified: lm,
StartOffset: startOffset,
Generation: gen,
Metageneration: metaGen,
}
return &Reader{
Attrs: attrs,
body: body,
size: size,
remain: remain,
wantCRC: crc,
checkCRC: checkCRC,
reopen: reopen,
}, nil
return r, err
}
// decompressiveTranscoding returns true if the request was served decompressed
// and different than its original storage form. This happens when the "Content-Encoding"
// header is "gzip".
// See:
// * https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip
// * https://github.com/googleapis/google-cloud-go/issues/1800
// - https://cloud.google.com/storage/docs/transcoding#transcoding_and_gzip
// - https://github.com/googleapis/google-cloud-go/issues/1800
func decompressiveTranscoding(res *http.Response) bool {
// Decompressive Transcoding.
return res.Header.Get("Content-Encoding") == "gzip" ||
@ -375,37 +190,21 @@ var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct {
Attrs ReaderObjectAttrs
body io.ReadCloser
seen, remain, size int64
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
reopen func(seen int64) (*http.Response, error)
reader io.ReadCloser
}
// Close closes the Reader. It must be called when done reading.
func (r *Reader) Close() error {
if r.body != nil {
return r.body.Close()
}
// TODO(noahdietz): Complete integration means returning this call's return
// value, which for gRPC will always be nil.
if r.reader != nil {
return r.reader.Close()
}
return nil
return r.reader.Close()
}
func (r *Reader) Read(p []byte) (int, error) {
read := r.readWithRetry
if r.reader != nil {
read = r.reader.Read
}
n, err := read(p)
n, err := r.reader.Read(p)
if r.remain != -1 {
r.remain -= int64(n)
}
@ -424,56 +223,6 @@ func (r *Reader) Read(p []byte) (int, error) {
return n, err
}
// newRangeReaderWithGRPC creates a new Reader with the given range that uses
// gRPC to read Object content.
//
// This is an experimental API and not intended for public use.
func (o *ObjectHandle) newRangeReaderWithGRPC(ctx context.Context, offset, length int64) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.newRangeReaderWithGRPC")
defer func() { trace.EndSpan(ctx, err) }()
if err = o.validate(); err != nil {
return
}
params := &newRangeReaderParams{
bucket: o.bucket,
object: o.object,
gen: o.gen,
offset: offset,
length: length,
encryptionKey: o.encryptionKey,
conds: o.conds,
}
r, err = o.c.tc.NewRangeReader(ctx, params)
return r, err
}
func (r *Reader) readWithRetry(p []byte) (int, error) {
n := 0
for len(p[n:]) > 0 {
m, err := r.body.Read(p[n:])
n += m
r.seen += int64(m)
if err == nil || err == io.EOF {
return n, err
}
// Read failed (likely due to connection issues), but we will try to reopen
// the pipe and continue. Send a ranged read request that takes into account
// the number of bytes we've already seen.
res, err := r.reopen(r.seen)
if err != nil {
// reopen already retries
return n, err
}
r.body.Close()
r.body = res.Body
}
return n, nil
}
// Size returns the size of the object in bytes.
// The returned value is always the same and is not affected by
// calls to Read or Close.

View file

@ -115,6 +115,10 @@ type Client struct {
// tc is the transport-agnostic client implemented with either gRPC or HTTP.
tc storageClient
// useGRPC flags whether the client uses gRPC. This is needed while the
// integration piece is only partially complete.
// TODO: remove before merging to main.
useGRPC bool
}
// NewClient creates a new Google Cloud Storage client.
@ -195,12 +199,18 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
return nil, fmt.Errorf("supplied endpoint %q is not valid: %v", ep, err)
}
tc, err := newHTTPStorageClient(ctx, withClientOptions(opts...))
if err != nil {
return nil, fmt.Errorf("storage: %v", err)
}
return &Client{
hc: hc,
raw: rawService,
scheme: u.Scheme,
readHost: u.Host,
creds: creds,
tc: tc,
}, nil
}
@ -215,7 +225,7 @@ func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, e
return nil, err
}
return &Client{tc: tc}, nil
return &Client{tc: tc, useGRPC: true}, nil
}
// Close closes the Client.
@ -509,13 +519,13 @@ func v2SanitizeHeaders(hdrs []string) []string {
// at https://cloud.google.com/storage/docs/authentication/canonical-requests#about-headers.
//
// V4 does a couple things differently from V2:
// - Headers get sorted by key, instead of by key:value. We do this in
// signedURLV4.
// - There's no canonical regexp: we simply split headers on :.
// - We don't exclude canonical headers.
// - We replace leading and trailing spaces in header values, like v2, but also
// all intermediate space duplicates get stripped. That is, there's only ever
// a single consecutive space.
// - Headers get sorted by key, instead of by key:value. We do this in
// signedURLV4.
// - There's no canonical regexp: we simply split headers on :.
// - We don't exclude canonical headers.
// - We replace leading and trailing spaces in header values, like v2, but also
// all intermediate space duplicates get stripped. That is, there's only ever
// a single consecutive space.
func v4SanitizeHeaders(hdrs []string) []string {
headerMap := map[string][]string{}
for _, hdr := range hdrs {
@ -907,27 +917,8 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error
if err := o.validate(); err != nil {
return nil, err
}
call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx)
if err := applyConds("Attrs", o.gen, o.conds, call); err != nil {
return nil, err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
setClientHeader(call.Header())
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, true, setRetryHeaderHTTP(call))
var e *googleapi.Error
if errors.As(err, &e) && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
}
if err != nil {
return nil, err
}
return newObject(obj), nil
opts := makeStorageOpts(true, o.retry, o.userProject)
return o.c.tc.GetObject(ctx, o.bucket, o.object, o.gen, o.encryptionKey, o.conds, opts...)
}
// Update updates an object with the provided attributes. See
@ -940,99 +931,9 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
if err := o.validate(); err != nil {
return nil, err
}
var attrs ObjectAttrs
// Lists of fields to send, and set to null, in the JSON.
var forceSendFields, nullFields []string
if uattrs.ContentType != nil {
attrs.ContentType = optional.ToString(uattrs.ContentType)
// For ContentType, sending the empty string is a no-op.
// Instead we send a null.
if attrs.ContentType == "" {
nullFields = append(nullFields, "ContentType")
} else {
forceSendFields = append(forceSendFields, "ContentType")
}
}
if uattrs.ContentLanguage != nil {
attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
// For ContentLanguage it's an error to send the empty string.
// Instead we send a null.
if attrs.ContentLanguage == "" {
nullFields = append(nullFields, "ContentLanguage")
} else {
forceSendFields = append(forceSendFields, "ContentLanguage")
}
}
if uattrs.ContentEncoding != nil {
attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding)
forceSendFields = append(forceSendFields, "ContentEncoding")
}
if uattrs.ContentDisposition != nil {
attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition)
forceSendFields = append(forceSendFields, "ContentDisposition")
}
if uattrs.CacheControl != nil {
attrs.CacheControl = optional.ToString(uattrs.CacheControl)
forceSendFields = append(forceSendFields, "CacheControl")
}
if uattrs.EventBasedHold != nil {
attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold)
forceSendFields = append(forceSendFields, "EventBasedHold")
}
if uattrs.TemporaryHold != nil {
attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold)
forceSendFields = append(forceSendFields, "TemporaryHold")
}
if !uattrs.CustomTime.IsZero() {
attrs.CustomTime = uattrs.CustomTime
forceSendFields = append(forceSendFields, "CustomTime")
}
if uattrs.Metadata != nil {
attrs.Metadata = uattrs.Metadata
if len(attrs.Metadata) == 0 {
// Sending the empty map is a no-op. We send null instead.
nullFields = append(nullFields, "Metadata")
} else {
forceSendFields = append(forceSendFields, "Metadata")
}
}
if uattrs.ACL != nil {
attrs.ACL = uattrs.ACL
// It's an error to attempt to delete the ACL, so
// we don't append to nullFields here.
forceSendFields = append(forceSendFields, "Acl")
}
rawObj := attrs.toRawObject(o.bucket)
rawObj.ForceSendFields = forceSendFields
rawObj.NullFields = nullFields
call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx)
if err := applyConds("Update", o.gen, o.conds, call); err != nil {
return nil, err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
if uattrs.PredefinedACL != "" {
call.PredefinedAcl(uattrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
setClientHeader(call.Header())
var isIdempotent bool
if o.conds != nil && o.conds.MetagenerationMatch != 0 {
isIdempotent = true
}
err = run(ctx, func() error { obj, err = call.Do(); return err }, o.retry, isIdempotent, setRetryHeaderHTTP(call))
var e *googleapi.Error
if errors.As(err, &e) && e.Code == http.StatusNotFound {
return nil, ErrObjectNotExist
}
if err != nil {
return nil, err
}
return newObject(obj), nil
isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0
opts := makeStorageOpts(isIdempotent, o.retry, o.userProject)
return o.c.tc.UpdateObject(ctx, o.bucket, o.object, &uattrs, o.gen, o.encryptionKey, o.conds, opts...)
}
// BucketName returns the name of the bucket.
@ -1052,11 +953,12 @@ func (o *ObjectHandle) ObjectName() string {
//
// For example, to change ContentType and delete ContentEncoding and
// Metadata, use
// ObjectAttrsToUpdate{
// ContentType: "text/html",
// ContentEncoding: "",
// Metadata: map[string]string{},
// }
//
// ObjectAttrsToUpdate{
// ContentType: "text/html",
// ContentEncoding: "",
// Metadata: map[string]string{},
// }
type ObjectAttrsToUpdate struct {
EventBasedHold optional.Bool
TemporaryHold optional.Bool
@ -1079,27 +981,11 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
if err := o.validate(); err != nil {
return err
}
call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx)
if err := applyConds("Delete", o.gen, o.conds, call); err != nil {
return err
}
if o.userProject != "" {
call.UserProject(o.userProject)
}
// Encryption doesn't apply to Delete.
setClientHeader(call.Header())
var isIdempotent bool
// Delete is idempotent if GenerationMatch or Generation have been passed in.
// The default generation is negative to get the latest version of the object.
if (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0 {
isIdempotent = true
}
err := run(ctx, func() error { return call.Do() }, o.retry, isIdempotent, setRetryHeaderHTTP(call))
var e *googleapi.Error
if errors.As(err, &e) && e.Code == http.StatusNotFound {
return ErrObjectNotExist
}
return err
isIdempotent := (o.conds != nil && o.conds.GenerationMatch != 0) || o.gen >= 0
opts := makeStorageOpts(isIdempotent, o.retry, o.userProject)
return o.c.tc.DeleteObject(ctx, o.bucket, o.object, o.gen, o.conds, opts...)
}
// ReadCompressed when true causes the read to happen without decompressing.
@ -2096,17 +1982,9 @@ func toProtoCommonObjectRequestParams(key []byte) *storagepb.CommonObjectRequest
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
r := c.raw.Projects.ServiceAccount.Get(projectID)
var res *raw.ServiceAccount
var err error
err = run(ctx, func() error {
res, err = r.Context(ctx).Do()
return err
}, c.retry, true, setRetryHeaderHTTP(r))
if err != nil {
return "", err
}
return res.EmailAddress, nil
o := makeStorageOpts(true, c.retry, "")
return c.tc.GetServiceAccount(ctx, projectID, o...)
}
// bucketResourceName formats the given project ID and bucketResourceName ID

View file

@ -16,25 +16,12 @@ package storage
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"sync"
"time"
"unicode/utf8"
storagepb "cloud.google.com/go/storage/internal/apiv2/stubs"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
)
const (
// Maximum amount of content that can be sent per WriteObjectRequest message.
// A buffer reaching this amount will precipitate a flush of the buffer.
//
// This is only used for the gRPC-based Writer.
maxPerMessageWriteSize int = int(storagepb.ServiceConstants_MAX_WRITE_CHUNK_BYTES)
)
// A Writer writes a Cloud Storage object.
@ -122,102 +109,6 @@ type Writer struct {
err error
}
func (w *Writer) open() error {
if err := w.validateWriteAttrs(); err != nil {
return err
}
pr, pw := io.Pipe()
w.pw = pw
w.opened = true
go w.monitorCancel()
attrs := w.ObjectAttrs
mediaOpts := []googleapi.MediaOption{
googleapi.ChunkSize(w.ChunkSize),
}
if c := attrs.ContentType; c != "" {
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
}
if w.ChunkRetryDeadline != 0 {
mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(w.ChunkRetryDeadline))
}
go func() {
defer close(w.donec)
rawObj := attrs.toRawObject(w.o.bucket)
if w.SendCRC32C {
rawObj.Crc32c = encodeUint32(attrs.CRC32C)
}
if w.MD5 != nil {
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
}
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
Media(pr, mediaOpts...).
Projection("full").
Context(w.ctx).
Name(w.o.object)
if w.ProgressFunc != nil {
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
}
if attrs.KMSKeyName != "" {
call.KmsKeyName(attrs.KMSKeyName)
}
if attrs.PredefinedACL != "" {
call.PredefinedAcl(attrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
w.mu.Lock()
w.err = err
w.mu.Unlock()
pr.CloseWithError(err)
return
}
var resp *raw.Object
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
if err == nil {
if w.o.userProject != "" {
call.UserProject(w.o.userProject)
}
setClientHeader(call.Header())
// The internals that perform call.Do automatically retry both the initial
// call to set up the upload as well as calls to upload individual chunks
// for a resumable upload (as long as the chunk size is non-zero). Hence
// there is no need to add retries here.
// Retry only when the operation is idempotent or the retry policy is RetryAlways.
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
var useRetry bool
if (w.o.retry == nil || w.o.retry.policy == RetryIdempotent) && isIdempotent {
useRetry = true
} else if w.o.retry != nil && w.o.retry.policy == RetryAlways {
useRetry = true
}
if useRetry {
if w.o.retry != nil {
call.WithRetry(w.o.retry.backoff, w.o.retry.shouldRetry)
} else {
call.WithRetry(nil, nil)
}
}
resp, err = call.Do()
}
if err != nil {
w.mu.Lock()
w.err = err
w.mu.Unlock()
pr.CloseWithError(err)
return
}
w.obj = newObject(resp)
}()
return nil
}
// Write appends to w. It implements the io.Writer interface.
//
// Since writes happen asynchronously, Write may return a nil
@ -235,12 +126,7 @@ func (w *Writer) Write(p []byte) (n int, err error) {
return 0, werr
}
if !w.opened {
// gRPC client has been initialized - use gRPC to upload.
if w.o.c.tc != nil {
if err := w.openWriter(); err != nil {
return 0, err
}
} else if err := w.open(); err != nil {
if err := w.openWriter(); err != nil {
return 0, err
}
}
@ -264,11 +150,7 @@ func (w *Writer) Write(p []byte) (n int, err error) {
// can be retrieved by calling Attrs.
func (w *Writer) Close() error {
if !w.opened {
if w.o.c.tc != nil {
if err := w.openWriter(); err != nil {
return err
}
} else if err := w.open(); err != nil {
if err := w.openWriter(); err != nil {
return err
}
}
@ -288,7 +170,12 @@ func (w *Writer) openWriter() (err error) {
if err := w.validateWriteAttrs(); err != nil {
return err
}
if w.o.gen != defaultGen {
return fmt.Errorf("storage: generation not supported on Writer, got %v", w.o.gen)
}
isIdempotent := w.o.conds != nil && (w.o.conds.GenerationMatch >= 0 || w.o.conds.DoesNotExist == true)
opts := makeStorageOpts(isIdempotent, w.o.retry, w.o.userProject)
go w.monitorCancel()
params := &openWriterParams{
ctx: w.ctx,
@ -304,7 +191,7 @@ func (w *Writer) openWriter() (err error) {
progress: w.progress,
setObj: func(o *ObjectAttrs) { w.obj = o },
}
w.pw, err = w.o.c.tc.OpenWriter(params)
w.pw, err = w.o.c.tc.OpenWriter(params, opts...)
if err != nil {
return err
}

View file

@ -31,12 +31,12 @@ func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
// allow you to get a list of the partitions in the order the endpoints
// will be resolved in.
//
// resolver, err := endpoints.DecodeModel(reader)
// resolver, err := endpoints.DecodeModel(reader)
//
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
// for _, p := range partitions {
// // ... inspect partitions
// }
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
// for _, p := range partitions {
// // ... inspect partitions
// }
func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
var opts DecodeModelOptions
opts.Set(optFns...)

View file

@ -2389,6 +2389,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "appmesh.ap-northeast-2.api.aws",
},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "appmesh.ap-northeast-3.api.aws",
},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@ -2416,6 +2425,15 @@ var awsPartition = partition{
}: endpoint{
Hostname: "appmesh.ap-southeast-2.api.aws",
},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
Variant: dualStackVariant,
}: endpoint{
Hostname: "appmesh.ap-southeast-3.api.aws",
},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -3243,6 +3261,22 @@ var awsPartition = partition{
}: endpoint{},
},
},
"backupstorage": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
"batch": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{},
@ -10445,6 +10479,37 @@ var awsPartition = partition{
}: endpoint{},
},
},
"geo": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-2",
}: endpoint{},
endpointKey{
Region: "us-west-2",
}: endpoint{},
},
},
"glacier": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@ -12473,6 +12538,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -29017,6 +29085,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "lambda.us-gov-east-1.api.aws",
},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
@ -29026,6 +29100,12 @@ var awsusgovPartition = partition{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
endpointKey{
Region: "us-gov-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "lambda.us-gov-west-1.api.aws",
},
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,

View file

@ -9,7 +9,7 @@
// AWS GovCloud (US) (aws-us-gov).
// .
//
// Enumerating Regions and Endpoint Metadata
// # Enumerating Regions and Endpoint Metadata
//
// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
// will allow you to get access to the list of underlying Partitions with the
@ -17,22 +17,22 @@
// resolving to a single partition, or enumerate regions, services, and endpoints
// in the partition.
//
// resolver := endpoints.DefaultResolver()
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
// resolver := endpoints.DefaultResolver()
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
//
// for _, p := range partitions {
// fmt.Println("Regions for", p.ID())
// for id, _ := range p.Regions() {
// fmt.Println("*", id)
// }
// for _, p := range partitions {
// fmt.Println("Regions for", p.ID())
// for id, _ := range p.Regions() {
// fmt.Println("*", id)
// }
//
// fmt.Println("Services for", p.ID())
// for id, _ := range p.Services() {
// fmt.Println("*", id)
// }
// }
// fmt.Println("Services for", p.ID())
// for id, _ := range p.Services() {
// fmt.Println("*", id)
// }
// }
//
// Using Custom Endpoints
// # Using Custom Endpoints
//
// The endpoints package also gives you the ability to use your own logic how
// endpoints are resolved. This is a great way to define a custom endpoint
@ -47,20 +47,19 @@
// of Resolver.EndpointFor, converting it to a type that satisfies the
// Resolver interface.
//
// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
// if service == endpoints.S3ServiceID {
// return endpoints.ResolvedEndpoint{
// URL: "s3.custom.endpoint.com",
// SigningRegion: "custom-signing-region",
// }, nil
// }
//
// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
// if service == endpoints.S3ServiceID {
// return endpoints.ResolvedEndpoint{
// URL: "s3.custom.endpoint.com",
// SigningRegion: "custom-signing-region",
// }, nil
// }
// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
// }
//
// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
// }
//
// sess := session.Must(session.NewSession(&aws.Config{
// Region: aws.String("us-west-2"),
// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
// }))
// sess := session.Must(session.NewSession(&aws.Config{
// Region: aws.String("us-west-2"),
// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
// }))
package endpoints

View file

@ -353,10 +353,12 @@ type EnumPartitions interface {
// as the second parameter.
//
// This example shows how to get the regions for DynamoDB in the AWS partition.
// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
//
// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
//
// This is equivalent to using the partition directly.
// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
//
// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
for _, p := range ps {
if p.ID() != partitionID {
@ -423,8 +425,8 @@ func (p Partition) ID() string { return p.id }
// of new regions and services expansions.
//
// Errors that can be returned.
// * UnknownServiceError
// * UnknownEndpointError
// - UnknownServiceError
// - UnknownEndpointError
func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
return p.p.EndpointFor(service, region, opts...)
}

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.44.70"
const SDKVersion = "1.44.76"

File diff suppressed because it is too large Load diff

View file

@ -39,11 +39,11 @@ func NormalizeBucketLocation(loc string) string {
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
// for more information on the values that can be returned.
//
// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
// Bucket: aws.String(bucket),
// })
// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
// err := req.Send()
// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
// Bucket: aws.String(bucket),
// })
// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
// err := req.Send()
var NormalizeBucketLocationHandler = request.NamedHandler{
Name: "awssdk.s3.NormalizeBucketLocation",
Fn: func(req *request.Request) {
@ -65,12 +65,12 @@ var NormalizeBucketLocationHandler = request.NamedHandler{
// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
// for more information on the values that can be returned.
//
// result, err := svc.GetBucketLocationWithContext(ctx,
// &s3.GetBucketLocationInput{
// Bucket: aws.String(bucket),
// },
// s3.WithNormalizeBucketLocation,
// )
// result, err := svc.GetBucketLocationWithContext(ctx,
// &s3.GetBucketLocationInput{
// Bucket: aws.String(bucket),
// },
// s3.WithNormalizeBucketLocation,
// )
func WithNormalizeBucketLocation(r *request.Request) {
r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
}

View file

@ -8,7 +8,7 @@
// See s3 package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
//
// Using the Client
// # Using the Client
//
// To contact Amazon Simple Storage Service with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.

View file

@ -6,99 +6,99 @@
// for optimizations if the Body satisfies that type. Once the Uploader instance
// is created you can call Upload concurrently from multiple goroutines safely.
//
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // Create an uploader with the session and default options
// uploader := s3manager.NewUploader(sess)
// // Create an uploader with the session and default options
// uploader := s3manager.NewUploader(sess)
//
// f, err := os.Open(filename)
// if err != nil {
// return fmt.Errorf("failed to open file %q, %v", filename, err)
// }
// f, err := os.Open(filename)
// if err != nil {
// return fmt.Errorf("failed to open file %q, %v", filename, err)
// }
//
// // Upload the file to S3.
// result, err := uploader.Upload(&s3manager.UploadInput{
// Bucket: aws.String(myBucket),
// Key: aws.String(myString),
// Body: f,
// })
// if err != nil {
// return fmt.Errorf("failed to upload file, %v", err)
// }
// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
// // Upload the file to S3.
// result, err := uploader.Upload(&s3manager.UploadInput{
// Bucket: aws.String(myBucket),
// Key: aws.String(myString),
// Body: f,
// })
// if err != nil {
// return fmt.Errorf("failed to upload file, %v", err)
// }
// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
//
// See the s3manager package's Uploader type documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
//
// Download Manager
// # Download Manager
//
// The s3manager package's Downloader provides concurrently downloading of Objects
// from S3. The Downloader will write S3 Object content with an io.WriterAt.
// Once the Downloader instance is created you can call Download concurrently from
// multiple goroutines safely.
//
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // Create a downloader with the session and default options
// downloader := s3manager.NewDownloader(sess)
// // Create a downloader with the session and default options
// downloader := s3manager.NewDownloader(sess)
//
// // Create a file to write the S3 Object contents to.
// f, err := os.Create(filename)
// if err != nil {
// return fmt.Errorf("failed to create file %q, %v", filename, err)
// }
// // Create a file to write the S3 Object contents to.
// f, err := os.Create(filename)
// if err != nil {
// return fmt.Errorf("failed to create file %q, %v", filename, err)
// }
//
// // Write the contents of S3 Object to the file
// n, err := downloader.Download(f, &s3.GetObjectInput{
// Bucket: aws.String(myBucket),
// Key: aws.String(myString),
// })
// if err != nil {
// return fmt.Errorf("failed to download file, %v", err)
// }
// fmt.Printf("file downloaded, %d bytes\n", n)
// // Write the contents of S3 Object to the file
// n, err := downloader.Download(f, &s3.GetObjectInput{
// Bucket: aws.String(myBucket),
// Key: aws.String(myString),
// })
// if err != nil {
// return fmt.Errorf("failed to download file, %v", err)
// }
// fmt.Printf("file downloaded, %d bytes\n", n)
//
// See the s3manager package's Downloader type documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
//
// Automatic URI cleaning
// # Automatic URI cleaning
//
// Interacting with objects whose keys contain adjacent slashes (e.g. bucketname/foo//bar/objectname)
// requires setting DisableRestProtocolURICleaning to true in the aws.Config struct
// used by the service client.
//
// svc := s3.New(sess, &aws.Config{
// DisableRestProtocolURICleaning: aws.Bool(true),
// })
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("//foo//bar//moo"),
// })
// svc := s3.New(sess, &aws.Config{
// DisableRestProtocolURICleaning: aws.Bool(true),
// })
// out, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String("bucketname"),
// Key: aws.String("//foo//bar//moo"),
// })
//
// Get Bucket Region
// # Get Bucket Region
//
// GetBucketRegion will attempt to get the region for a bucket using a region
// hint to determine which AWS partition to perform the query on. Use this utility
// to determine the region a bucket is in.
//
// sess := session.Must(session.NewSession())
// sess := session.Must(session.NewSession())
//
// bucket := "my-bucket"
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
// if err != nil {
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
// }
// return err
// }
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
// bucket := "my-bucket"
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
// if err != nil {
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
// }
// return err
// }
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
//
// See the s3manager package's GetBucketRegion function documentation for more information
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
//
// S3 Crypto Client
// # S3 Crypto Client
//
// The s3crypto package provides the tools to upload and download encrypted
// content from S3. The Encryption and Decryption clients can be used concurrently
@ -106,5 +106,4 @@
//
// See the s3crypto package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
//
package s3

View file

@ -52,9 +52,8 @@ func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
// outpost access-point resource.
//
// Supported Outpost AccessPoint ARN format:
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
//
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) {
// outpost accesspoint arn is only valid if service is s3-outposts
if a.Service != "s3-outposts" {

View file

@ -37,7 +37,6 @@ type accessPointEndpointBuilder arn.AccessPointARN
// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com
//
// Access Point Endpoint requests are signed using "s3" as signing name.
//
func (a accessPointEndpointBuilder) build(req *request.Request) error {
resolveService := arn.AccessPointARN(a).Service
resolveRegion := arn.AccessPointARN(a).Region
@ -92,7 +91,6 @@ type s3ObjectLambdaAccessPointEndpointBuilder arn.S3ObjectLambdaAccessPointARN
// - example : myaccesspoint-012345678901.s3-object-lambda.us-west-2.amazonaws.com
//
// Access Point Endpoint requests are signed using "s3-object-lambda" as signing name.
//
func (a s3ObjectLambdaAccessPointEndpointBuilder) build(req *request.Request) error {
resolveRegion := arn.S3ObjectLambdaAccessPointARN(a).Region
@ -147,7 +145,6 @@ type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN
// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com
//
// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name.
//
func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error {
resolveRegion := o.Region
resolveService := o.Service

View file

@ -23,37 +23,37 @@ import (
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // Amazon Simple Storage Service.
// func myFunc(svc s3iface.S3API) bool {
// // Make svc.AbortMultipartUpload request
// }
// // myFunc uses an SDK service client to make a request to
// // Amazon Simple Storage Service.
// func myFunc(svc s3iface.S3API) bool {
// // Make svc.AbortMultipartUpload request
// }
//
// func main() {
// sess := session.New()
// svc := s3.New(sess)
// func main() {
// sess := session.New()
// svc := s3.New(sess)
//
// myFunc(svc)
// }
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockS3Client struct {
// s3iface.S3API
// }
// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
// // mock response/functionality
// }
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockS3Client struct {
// s3iface.S3API
// }
// func (m *mockS3Client) AbortMultipartUpload(input *s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) {
// // mock response/functionality
// }
//
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockS3Client{}
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockS3Client{}
//
// myfunc(mockSvc)
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,

View file

@ -112,6 +112,7 @@ type BatchDeleteIterator interface {
// iterate through a list of objects and delete the objects.
//
// Example:
//
// iter := &s3manager.DeleteListIterator{
// Client: svc,
// Input: &s3.ListObjectsInput{
@ -203,6 +204,7 @@ type BatchDelete struct {
// objects.
//
// Example:
//
// batcher := s3manager.NewBatchDeleteWithClient(client, size)
//
// objects := []BatchDeleteObject{
@ -236,6 +238,7 @@ func NewBatchDeleteWithClient(client s3iface.S3API, options ...func(*BatchDelete
// objects.
//
// Example:
//
// batcher := s3manager.NewBatchDelete(sess, size)
//
// objects := []BatchDeleteObject{

View file

@ -24,30 +24,30 @@ import (
// For example to get the region of a bucket which exists in "eu-central-1"
// you could provide a region hint of "us-west-2".
//
// sess := session.Must(session.NewSession())
// sess := session.Must(session.NewSession())
//
// bucket := "my-bucket"
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
// if err != nil {
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
// }
// return err
// }
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
// bucket := "my-bucket"
// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
// if err != nil {
// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
// }
// return err
// }
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
//
// By default the request will be made to the Amazon S3 endpoint using the Path
// style addressing.
//
// s3.us-west-2.amazonaws.com/bucketname
// s3.us-west-2.amazonaws.com/bucketname
//
// This is not compatible with Amazon S3's FIPS endpoints. To override this
// behavior to use Virtual Host style addressing, provide a functional option
// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false).
//
// region, err := s3manager.GetBucketRegion(ctx, sess, "bucketname", "us-west-2", func(r *request.Request) {
// r.S3ForcePathStyle = aws.Bool(false)
// })
// region, err := s3manager.GetBucketRegion(ctx, sess, "bucketname", "us-west-2", func(r *request.Request) {
// r.S3ForcePathStyle = aws.Bool(false)
// })
//
// To configure the GetBucketRegion to make a request via the Amazon
// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g.
@ -55,11 +55,11 @@ import (
// utility is called with. The hint region will be ignored if an endpoint URL
// is configured on the session or client.
//
// sess, err := session.NewSession(&aws.Config{
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
// })
// sess, err := session.NewSession(&aws.Config{
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
// })
//
// region, err := s3manager.GetBucketRegion(context.Background(), sess, "bucketname", "")
// region, err := s3manager.GetBucketRegion(context.Background(), sess, "bucketname", "")
func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) {
var cfg aws.Config
if len(regionHint) != 0 {
@ -78,15 +78,15 @@ const bucketRegionHeader = "X-Amz-Bucket-Region"
// By default the request will be made to the Amazon S3 endpoint using the Path
// style addressing.
//
// s3.us-west-2.amazonaws.com/bucketname
// s3.us-west-2.amazonaws.com/bucketname
//
// This is not compatible with Amazon S3's FIPS endpoints. To override this
// behavior to use Virtual Host style addressing, provide a functional option
// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false).
//
// region, err := s3manager.GetBucketRegionWithClient(ctx, client, "bucketname", func(r *request.Request) {
// r.S3ForcePathStyle = aws.Bool(false)
// })
// region, err := s3manager.GetBucketRegionWithClient(ctx, client, "bucketname", func(r *request.Request) {
// r.S3ForcePathStyle = aws.Bool(false)
// })
//
// To configure the GetBucketRegion to make a request via the Amazon
// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g.
@ -94,11 +94,11 @@ const bucketRegionHeader = "X-Amz-Bucket-Region"
// utility is called with. The hint region will be ignored if an endpoint URL
// is configured on the session or client.
//
// region, err := s3manager.GetBucketRegionWithClient(context.Background(),
// s3.New(sess, &aws.Config{
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
// }),
// "bucketname")
// region, err := s3manager.GetBucketRegionWithClient(context.Background(),
// s3.New(sess, &aws.Config{
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
// }),
// "bucketname")
//
// See GetBucketRegion for more information.
func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) {

View file

@ -86,16 +86,17 @@ func WithDownloaderRequestOptions(opts ...request.Option) func(*Downloader) {
// interface.
//
// Example:
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // Create a downloader with the session and default options
// downloader := s3manager.NewDownloader(sess)
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // Create a downloader with the session and custom options
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
// // Create a downloader with the session and default options
// downloader := s3manager.NewDownloader(sess)
//
// // Create a downloader with the session and custom options
// downloader := s3manager.NewDownloader(sess, func(d *s3manager.Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloader(c client.ConfigProvider, options ...func(*Downloader)) *Downloader {
return newDownloader(s3.New(c), options...)
}
@ -120,19 +121,20 @@ func newDownloader(client s3iface.S3API, options ...func(*Downloader)) *Download
// to make S3 API calls.
//
// Example:
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // The S3 client the S3 Downloader will use
// s3Svc := s3.New(sess)
// // The session the S3 Downloader will use
// sess := session.Must(session.NewSession())
//
// // Create a downloader with the s3 client and default options
// downloader := s3manager.NewDownloaderWithClient(s3Svc)
// // The S3 client the S3 Downloader will use
// s3Svc := s3.New(sess)
//
// // Create a downloader with the s3 client and custom options
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
// // Create a downloader with the s3 client and default options
// downloader := s3manager.NewDownloaderWithClient(s3Svc)
//
// // Create a downloader with the s3 client and custom options
// downloader := s3manager.NewDownloaderWithClient(s3Svc, func(d *s3manager.Downloader) {
// d.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewDownloaderWithClient(svc s3iface.S3API, options ...func(*Downloader)) *Downloader {
return newDownloader(svc, options...)
}
@ -223,6 +225,7 @@ func (d Downloader) DownloadWithContext(ctx aws.Context, w io.WriterAt, input *s
// to the io.WriterAt specificed in the iterator.
//
// Example:
//
// svc := s3manager.NewDownloader(session)
//
// fooFile, err := os.Open("/tmp/foo.file")
@ -464,7 +467,11 @@ func (d *downloader) tryDownloadChunk(in *s3.GetObjectInput, w io.Writer) (int64
}
d.setTotalBytes(resp) // Set total if not yet set.
n, err := io.Copy(w, resp.Body)
var src io.Reader = resp.Body
if d.cfg.BufferProvider != nil {
src = &suppressWriterAt{suppressed: src}
}
n, err := io.Copy(w, src)
resp.Body.Close()
if err != nil {
return n, &errReadingBody{err: err}

View file

@ -40,18 +40,17 @@ const DefaultUploadConcurrency = 5
//
// Example:
//
// u := s3manager.NewUploader(opts)
// output, err := u.upload(input)
// if err != nil {
// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
// // Process error and its associated uploadID
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
// } else {
// // Process error generically
// fmt.Println("Error:", err.Error())
// }
// }
//
// u := s3manager.NewUploader(opts)
// output, err := u.upload(input)
// if err != nil {
// if multierr, ok := err.(s3manager.MultiUploadFailure); ok {
// // Process error and its associated uploadID
// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID())
// } else {
// // Process error generically
// fmt.Println("Error:", err.Error())
// }
// }
type MultiUploadFailure interface {
awserr.Error
@ -77,7 +76,7 @@ type multiUploadError struct {
// Error returns the string representation of the error.
//
// See apierr.BaseError ErrorWithExtra for output format
// # See apierr.BaseError ErrorWithExtra for output format
//
// Satisfies the error interface.
func (m multiUploadError) Error() string {
@ -187,16 +186,17 @@ type Uploader struct {
// satisfies the client.ConfigProvider interface.
//
// Example:
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // Create an uploader with the session and default options
// uploader := s3manager.NewUploader(sess)
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // Create an uploader with the session and custom options
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
// // Create an uploader with the session and default options
// uploader := s3manager.NewUploader(sess)
//
// // Create an uploader with the session and custom options
// uploader := s3manager.NewUploader(session, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploader(c client.ConfigProvider, options ...func(*Uploader)) *Uploader {
return newUploader(s3.New(c), options...)
}
@ -225,19 +225,20 @@ func newUploader(client s3iface.S3API, options ...func(*Uploader)) *Uploader {
// a S3 service client to make S3 API calls.
//
// Example:
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // S3 service client the Upload manager will use.
// s3Svc := s3.New(sess)
// // The session the S3 Uploader will use
// sess := session.Must(session.NewSession())
//
// // Create an uploader with S3 client and default options
// uploader := s3manager.NewUploaderWithClient(s3Svc)
// // S3 service client the Upload manager will use.
// s3Svc := s3.New(sess)
//
// // Create an uploader with S3 client and custom options
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
// // Create an uploader with S3 client and default options
// uploader := s3manager.NewUploaderWithClient(s3Svc)
//
// // Create an uploader with S3 client and custom options
// uploader := s3manager.NewUploaderWithClient(s3Svc, func(u *s3manager.Uploader) {
// u.PartSize = 64 * 1024 * 1024 // 64MB per part
// })
func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploader {
return newUploader(svc, options...)
}
@ -256,21 +257,22 @@ func NewUploaderWithClient(svc s3iface.S3API, options ...func(*Uploader)) *Uploa
// It is safe to call this method concurrently across goroutines.
//
// Example:
// // Upload input parameters
// upParams := &s3manager.UploadInput{
// Bucket: &bucketName,
// Key: &keyName,
// Body: file,
// }
//
// // Perform an upload.
// result, err := uploader.Upload(upParams)
// // Upload input parameters
// upParams := &s3manager.UploadInput{
// Bucket: &bucketName,
// Key: &keyName,
// Body: file,
// }
//
// // Perform upload with options different than the those in the Uploader.
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
// })
// // Perform an upload.
// result, err := uploader.Upload(upParams)
//
// // Perform upload with options different than the those in the Uploader.
// result, err := uploader.Upload(upParams, func(u *s3manager.Uploader) {
// u.PartSize = 10 * 1024 * 1024 // 10MB part size
// u.LeavePartsOnError = true // Don't delete the parts if the upload fails.
// })
func (u Uploader) Upload(input *UploadInput, options ...func(*Uploader)) (*UploadOutput, error) {
return u.UploadWithContext(aws.BackgroundContext(), input, options...)
}
@ -310,6 +312,7 @@ func (u Uploader) UploadWithContext(ctx aws.Context, input *UploadInput, opts ..
// allows for custom defined functionality.
//
// Example:
//
// svc:= s3manager.NewUploader(sess)
//
// objects := []BatchUploadObject{

View file

@ -73,3 +73,11 @@ func (p *PooledBufferedReadFromProvider) GetReadFrom(writer io.Writer) (r Writer
}
return r, cleanup
}
type suppressWriterAt struct {
suppressed io.Reader
}
func (s *suppressWriterAt) Read(p []byte) (n int, err error) {
return s.suppressed.Read(p)
}

View file

@ -39,13 +39,14 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a S3 client from just a session.
// svc := s3.New(mySession)
// mySession := session.Must(session.NewSession())
//
// // Create a S3 client with additional configuration
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
// // Create a S3 client from just a session.
// svc := s3.New(mySession)
//
// // Create a S3 client with additional configuration
// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
c := p.ClientConfig(EndpointsID, cfgs...)
if c.SigningNameDerived || len(c.SigningName) == 0 {

View file

@ -29,14 +29,13 @@ const opGetRoleCredentials = "GetRoleCredentials"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the GetRoleCredentialsRequest method.
// req, resp := client.GetRoleCredentialsRequest(params)
//
// // Example sending a request using the GetRoleCredentialsRequest method.
// req, resp := client.GetRoleCredentialsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials
func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) {
@ -69,20 +68,21 @@ func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *re
// API operation GetRoleCredentials for usage and error information.
//
// Returned Error Types:
// * InvalidRequestException
// Indicates that a problem occurred with the input to the request. For example,
// a required parameter might be missing or out of range.
//
// * UnauthorizedException
// Indicates that the request is not authorized. This can happen due to an invalid
// access token in the request.
// - InvalidRequestException
// Indicates that a problem occurred with the input to the request. For example,
// a required parameter might be missing or out of range.
//
// * TooManyRequestsException
// Indicates that the request is being made too frequently and is more than
// what the server can handle.
// - UnauthorizedException
// Indicates that the request is not authorized. This can happen due to an invalid
// access token in the request.
//
// * ResourceNotFoundException
// The specified resource doesn't exist.
// - TooManyRequestsException
// Indicates that the request is being made too frequently and is more than
// what the server can handle.
//
// - ResourceNotFoundException
// The specified resource doesn't exist.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials
func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) {
@ -122,14 +122,13 @@ const opListAccountRoles = "ListAccountRoles"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the ListAccountRolesRequest method.
// req, resp := client.ListAccountRolesRequest(params)
//
// // Example sending a request using the ListAccountRolesRequest method.
// req, resp := client.ListAccountRolesRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles
func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) {
@ -157,7 +156,8 @@ func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *reques
// ListAccountRoles API operation for AWS Single Sign-On.
//
// Lists all roles that are assigned to the user for a given AWS account.
// Lists all roles that are assigned to the user for a given Amazon Web Services
// account.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -167,20 +167,21 @@ func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *reques
// API operation ListAccountRoles for usage and error information.
//
// Returned Error Types:
// * InvalidRequestException
// Indicates that a problem occurred with the input to the request. For example,
// a required parameter might be missing or out of range.
//
// * UnauthorizedException
// Indicates that the request is not authorized. This can happen due to an invalid
// access token in the request.
// - InvalidRequestException
// Indicates that a problem occurred with the input to the request. For example,
// a required parameter might be missing or out of range.
//
// * TooManyRequestsException
// Indicates that the request is being made too frequently and is more than
// what the server can handle.
// - UnauthorizedException
// Indicates that the request is not authorized. This can happen due to an invalid
// access token in the request.
//
// * ResourceNotFoundException
// The specified resource doesn't exist.
// - TooManyRequestsException
// Indicates that the request is being made too frequently and is more than
// what the server can handle.
//
// - ResourceNotFoundException
// The specified resource doesn't exist.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles
func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) {
@ -212,15 +213,14 @@ func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRol
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListAccountRoles operation.
// pageNum := 0
// err := client.ListAccountRolesPages(params,
// func(page *sso.ListAccountRolesOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
// // Example iterating over at most 3 pages of a ListAccountRoles operation.
// pageNum := 0
// err := client.ListAccountRolesPages(params,
// func(page *sso.ListAccountRolesOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error {
return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn)
}
@ -272,14 +272,13 @@ const opListAccounts = "ListAccounts"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the ListAccountsRequest method.
// req, resp := client.ListAccountsRequest(params)
//
// // Example sending a request using the ListAccountsRequest method.
// req, resp := client.ListAccountsRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts
func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) {
@ -307,10 +306,11 @@ func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Reques
// ListAccounts API operation for AWS Single Sign-On.
//
// Lists all AWS accounts assigned to the user. These AWS accounts are assigned
// by the administrator of the account. For more information, see Assign User
// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers)
// in the AWS SSO User Guide. This operation returns a paginated response.
// Lists all Amazon Web Services accounts assigned to the user. These Amazon
// Web Services accounts are assigned by the administrator of the account. For
// more information, see Assign User Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers)
// in the Amazon Web Services SSO User Guide. This operation returns a paginated
// response.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -320,20 +320,21 @@ func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Reques
// API operation ListAccounts for usage and error information.
//
// Returned Error Types:
// * InvalidRequestException
// Indicates that a problem occurred with the input to the request. For example,
// a required parameter might be missing or out of range.
//
// * UnauthorizedException
// Indicates that the request is not authorized. This can happen due to an invalid
// access token in the request.
// - InvalidRequestException
// Indicates that a problem occurred with the input to the request. For example,
// a required parameter might be missing or out of range.
//
// * TooManyRequestsException
// Indicates that the request is being made too frequently and is more than
// what the server can handle.
// - UnauthorizedException
// Indicates that the request is not authorized. This can happen due to an invalid
// access token in the request.
//
// * ResourceNotFoundException
// The specified resource doesn't exist.
// - TooManyRequestsException
// Indicates that the request is being made too frequently and is more than
// what the server can handle.
//
// - ResourceNotFoundException
// The specified resource doesn't exist.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts
func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) {
@ -365,15 +366,14 @@ func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput,
//
// Note: This operation can generate multiple requests to a service.
//
// // Example iterating over at most 3 pages of a ListAccounts operation.
// pageNum := 0
// err := client.ListAccountsPages(params,
// func(page *sso.ListAccountsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
//
// // Example iterating over at most 3 pages of a ListAccounts operation.
// pageNum := 0
// err := client.ListAccountsPages(params,
// func(page *sso.ListAccountsOutput, lastPage bool) bool {
// pageNum++
// fmt.Println(page)
// return pageNum <= 3
// })
func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error {
return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn)
}
@ -425,14 +425,13 @@ const opLogout = "Logout"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the LogoutRequest method.
// req, resp := client.LogoutRequest(params)
//
// // Example sending a request using the LogoutRequest method.
// req, resp := client.LogoutRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout
func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) {
@ -455,7 +454,21 @@ func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *L
// Logout API operation for AWS Single Sign-On.
//
// Removes the client- and server-side session that is associated with the user.
// Removes the locally stored SSO tokens from the client-side cache and sends
// an API call to the Amazon Web Services SSO service to invalidate the corresponding
// server-side Amazon Web Services SSO sign in session.
//
// If a user uses Amazon Web Services SSO to access the AWS CLI, the users
// Amazon Web Services SSO sign in session is used to obtain an IAM session,
// as specified in the corresponding Amazon Web Services SSO permission set.
// More specifically, Amazon Web Services SSO assumes an IAM role in the target
// account on behalf of the user, and the corresponding temporary Amazon Web
// Services credentials are returned to the client.
//
// After user logout, any existing IAM role sessions that were created by using
// Amazon Web Services SSO permission sets continue based on the duration configured
// in the permission set. For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html)
// in the Amazon Web Services SSO User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -465,17 +478,18 @@ func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *L
// API operation Logout for usage and error information.
//
// Returned Error Types:
// * InvalidRequestException
// Indicates that a problem occurred with the input to the request. For example,
// a required parameter might be missing or out of range.
//
// * UnauthorizedException
// Indicates that the request is not authorized. This can happen due to an invalid
// access token in the request.
// - InvalidRequestException
// Indicates that a problem occurred with the input to the request. For example,
// a required parameter might be missing or out of range.
//
// * TooManyRequestsException
// Indicates that the request is being made too frequently and is more than
// what the server can handle.
// - UnauthorizedException
// Indicates that the request is not authorized. This can happen due to an invalid
// access token in the request.
//
// - TooManyRequestsException
// Indicates that the request is being made too frequently and is more than
// what the server can handle.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout
func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) {
@ -499,17 +513,20 @@ func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...req
return out, req.Send()
}
// Provides information about your AWS account.
// Provides information about your Amazon Web Services account.
type AccountInfo struct {
_ struct{} `type:"structure"`
// The identifier of the AWS account that is assigned to the user.
// The identifier of the Amazon Web Services account that is assigned to the
// user.
AccountId *string `locationName:"accountId" type:"string"`
// The display name of the AWS account that is assigned to the user.
// The display name of the Amazon Web Services account that is assigned to the
// user.
AccountName *string `locationName:"accountName" type:"string"`
// The email address of the AWS account that is assigned to the user.
// The email address of the Amazon Web Services account that is assigned to
// the user.
EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"`
}
@ -554,7 +571,7 @@ type GetRoleCredentialsInput struct {
// The token issued by the CreateToken API call. For more information, see CreateToken
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
// in the AWS SSO OIDC API Reference Guide.
// in the Amazon Web Services SSO OIDC API Reference Guide.
//
// AccessToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by GetRoleCredentialsInput's
@ -563,7 +580,8 @@ type GetRoleCredentialsInput struct {
// AccessToken is a required field
AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
// The identifier for the AWS account that is assigned to the user.
// The identifier for the Amazon Web Services account that is assigned to the
// user.
//
// AccountId is a required field
AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"`
@ -730,7 +748,7 @@ type ListAccountRolesInput struct {
// The token issued by the CreateToken API call. For more information, see CreateToken
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
// in the AWS SSO OIDC API Reference Guide.
// in the Amazon Web Services SSO OIDC API Reference Guide.
//
// AccessToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by ListAccountRolesInput's
@ -739,7 +757,8 @@ type ListAccountRolesInput struct {
// AccessToken is a required field
AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
// The identifier for the AWS account that is assigned to the user.
// The identifier for the Amazon Web Services account that is assigned to the
// user.
//
// AccountId is a required field
AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"`
@ -859,7 +878,7 @@ type ListAccountsInput struct {
// The token issued by the CreateToken API call. For more information, see CreateToken
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
// in the AWS SSO OIDC API Reference Guide.
// in the Amazon Web Services SSO OIDC API Reference Guide.
//
// AccessToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by ListAccountsInput's
@ -974,7 +993,7 @@ type LogoutInput struct {
// The token issued by the CreateToken API call. For more information, see CreateToken
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
// in the AWS SSO OIDC API Reference Guide.
// in the Amazon Web Services SSO OIDC API Reference Guide.
//
// AccessToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by LogoutInput's
@ -1113,17 +1132,18 @@ type RoleCredentials struct {
_ struct{} `type:"structure"`
// The identifier used for the temporary security credentials. For more information,
// see Using Temporary Security Credentials to Request Access to AWS Resources
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
// in the AWS IAM User Guide.
// see Using Temporary Security Credentials to Request Access to Amazon Web
// Services Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
// in the Amazon Web Services IAM User Guide.
AccessKeyId *string `locationName:"accessKeyId" type:"string"`
// The date on which temporary security credentials expire.
Expiration *int64 `locationName:"expiration" type:"long"`
// The key that is used to sign the request. For more information, see Using
// Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
// in the AWS IAM User Guide.
// Temporary Security Credentials to Request Access to Amazon Web Services Resources
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
// in the Amazon Web Services IAM User Guide.
//
// SecretAccessKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by RoleCredentials's
@ -1131,8 +1151,9 @@ type RoleCredentials struct {
SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"`
// The token used for temporary credentials. For more information, see Using
// Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
// in the AWS IAM User Guide.
// Temporary Security Credentials to Request Access to Amazon Web Services Resources
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
// in the Amazon Web Services IAM User Guide.
//
// SessionToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by RoleCredentials's
@ -1186,7 +1207,7 @@ func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials {
type RoleInfo struct {
_ struct{} `type:"structure"`
// The identifier of the AWS account assigned to the user.
// The identifier of the Amazon Web Services account assigned to the user.
AccountId *string `locationName:"accountId" type:"string"`
// The friendly name of the role that is assigned to the user.

View file

@ -3,30 +3,32 @@
// Package sso provides the client and types for making API
// requests to AWS Single Sign-On.
//
// AWS Single Sign-On Portal is a web service that makes it easy for you to
// assign user access to AWS SSO resources such as the user portal. Users can
// get AWS account applications and roles assigned to them and get federated
// into the application.
// Amazon Web Services Single Sign On Portal is a web service that makes it
// easy for you to assign user access to Amazon Web Services SSO resources such
// as the AWS access portal. Users can get Amazon Web Services account applications
// and roles assigned to them and get federated into the application.
//
// For general information about AWS SSO, see What is AWS Single Sign-On? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
// in the AWS SSO User Guide.
// Although Amazon Web Services Single Sign-On was renamed, the sso and identitystore
// API namespaces will continue to retain their original name for backward compatibility
// purposes. For more information, see Amazon Web Services SSO rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed).
//
// This API reference guide describes the AWS SSO Portal operations that you
// can call programatically and includes detailed information on data types
// and errors.
// This API reference guide describes the Amazon Web Services SSO Portal operations
// that you can call programatically and includes detailed information on data
// types and errors.
//
// AWS provides SDKs that consist of libraries and sample code for various programming
// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs
// provide a convenient way to create programmatic access to AWS SSO and other
// AWS services. For more information about the AWS SDKs, including how to download
// and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/).
// Amazon Web Services provides SDKs that consist of libraries and sample code
// for various programming languages and platforms, such as Java, Ruby, .Net,
// iOS, or Android. The SDKs provide a convenient way to create programmatic
// access to Amazon Web Services SSO and other Amazon Web Services services.
// For more information about the Amazon Web Services SDKs, including how to
// download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/).
//
// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service.
//
// See sso package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/
//
// Using the Client
// # Using the Client
//
// To contact AWS Single Sign-On with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.

View file

@ -40,13 +40,14 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a SSO client from just a session.
// svc := sso.New(mySession)
// mySession := session.Must(session.NewSession())
//
// // Create a SSO client with additional configuration
// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
// // Create a SSO client from just a session.
// svc := sso.New(mySession)
//
// // Create a SSO client with additional configuration
// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO {
c := p.ClientConfig(EndpointsID, cfgs...)
if c.SigningNameDerived || len(c.SigningName) == 0 {

View file

@ -23,37 +23,37 @@ import (
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // AWS Single Sign-On.
// func myFunc(svc ssoiface.SSOAPI) bool {
// // Make svc.GetRoleCredentials request
// }
// // myFunc uses an SDK service client to make a request to
// // AWS Single Sign-On.
// func myFunc(svc ssoiface.SSOAPI) bool {
// // Make svc.GetRoleCredentials request
// }
//
// func main() {
// sess := session.New()
// svc := sso.New(sess)
// func main() {
// sess := session.New()
// svc := sso.New(sess)
//
// myFunc(svc)
// }
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockSSOClient struct {
// ssoiface.SSOAPI
// }
// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) {
// // mock response/functionality
// }
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockSSOClient struct {
// ssoiface.SSOAPI
// }
// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) {
// // mock response/functionality
// }
//
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockSSOClient{}
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockSSOClient{}
//
// myfunc(mockSvc)
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,

View file

@ -28,14 +28,13 @@ const opAssumeRole = "AssumeRole"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the AssumeRoleRequest method.
// req, resp := client.AssumeRoleRequest(params)
//
// // Example sending a request using the AssumeRoleRequest method.
// req, resp := client.AssumeRoleRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
@ -66,7 +65,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
// in the IAM User Guide.
//
// Permissions
// # Permissions
//
// The temporary security credentials created by AssumeRole can be used to make
// API calls to any Amazon Web Services service with the following exception:
@ -105,10 +104,10 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// To allow a user to assume a role in the same account, you can do either of
// the following:
//
// * Attach a policy to the user that allows the user to call AssumeRole
// (as long as the role's trust policy trusts the account).
// - Attach a policy to the user that allows the user to call AssumeRole
// (as long as the role's trust policy trusts the account).
//
// * Add the user as a principal directly in the role's trust policy.
// - Add the user as a principal directly in the role's trust policy.
//
// You can do either because the roles trust policy acts as an IAM resource-based
// policy. When a resource-based policy grants access to a principal in the
@ -116,7 +115,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
// in the IAM User Guide.
//
// Tags
// # Tags
//
// (Optional) You can pass tag key-value pairs to your session. These tags are
// called session tags. For more information about session tags, see Passing
@ -134,7 +133,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
// in the IAM User Guide.
//
// Using MFA with AssumeRole
// # Using MFA with AssumeRole
//
// (Optional) You can include multi-factor authentication (MFA) information
// when you call AssumeRole. This is useful for cross-account scenarios to ensure
@ -163,35 +162,36 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// API operation AssumeRole for usage and error information.
//
// Returned Error Codes:
// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
// The request was rejected because the policy document was malformed. The error
// message describes the specific error.
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
// and session tags combined was too large. An Amazon Web Services conversion
// compresses the session policy document, session policy ARNs, and session
// tags into a packed binary format that has a separate limit. The error message
// indicates by percentage how close the policies and tags are to the upper
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
// The request was rejected because the policy document was malformed. The error
// message describes the specific error.
//
// You could receive this error even though you meet other defined session policy
// and session tag limits. For more information, see IAM and STS Entity Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
// and session tags combined was too large. An Amazon Web Services conversion
// compresses the session policy document, session policy ARNs, and session
// tags into a packed binary format that has a separate limit. The error message
// indicates by percentage how close the policies and tags are to the upper
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
// * ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
// You could receive this error even though you meet other defined session policy
// and session tag limits. For more information, see IAM and STS Entity Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
// * ErrCodeExpiredTokenException "ExpiredTokenException"
// The web identity token that was passed is expired or is not valid. Get a
// new identity token from the identity provider and then retry the request.
// - ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// - ErrCodeExpiredTokenException "ExpiredTokenException"
// The web identity token that was passed is expired or is not valid. Get a
// new identity token from the identity provider and then retry the request.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
@ -231,14 +231,13 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the AssumeRoleWithSAMLRequest method.
// req, resp := client.AssumeRoleWithSAMLRequest(params)
//
// // Example sending a request using the AssumeRoleWithSAMLRequest method.
// req, resp := client.AssumeRoleWithSAMLRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
@ -274,7 +273,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// can use these temporary security credentials to sign calls to Amazon Web
// Services services.
//
// Session Duration
// # Session Duration
//
// By default, the temporary security credentials created by AssumeRoleWithSAML
// last for one hour. However, you can use the optional DurationSeconds parameter
@ -300,7 +299,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// a role using role chaining and provide a DurationSeconds parameter value
// greater than one hour, the operation fails.
//
// Permissions
// # Permissions
//
// The temporary security credentials created by AssumeRoleWithSAML can be used
// to make API calls to any Amazon Web Services service with the following exception:
@ -331,7 +330,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// identifiable information (PII). For example, you could instead use the persistent
// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
//
// Tags
// # Tags
//
// (Optional) You can configure your IdP to pass attributes into your SAML assertion
// as session tags. Each session tag consists of a key name and an associated
@ -365,7 +364,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
// in the IAM User Guide.
//
// SAML Configuration
// # SAML Configuration
//
// Before your application can call AssumeRoleWithSAML, you must configure your
// SAML identity provider (IdP) to issue the claims required by Amazon Web Services.
@ -376,17 +375,17 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
//
// For more information, see the following resources:
//
// * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
// in the IAM User Guide.
// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
// in the IAM User Guide.
//
// * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
// in the IAM User Guide.
// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
// in the IAM User Guide.
//
// * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
// in the IAM User Guide.
// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
// in the IAM User Guide.
//
// * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
// in the IAM User Guide.
// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
// in the IAM User Guide.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -396,47 +395,48 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// API operation AssumeRoleWithSAML for usage and error information.
//
// Returned Error Codes:
// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
// The request was rejected because the policy document was malformed. The error
// message describes the specific error.
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
// and session tags combined was too large. An Amazon Web Services conversion
// compresses the session policy document, session policy ARNs, and session
// tags into a packed binary format that has a separate limit. The error message
// indicates by percentage how close the policies and tags are to the upper
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
// The request was rejected because the policy document was malformed. The error
// message describes the specific error.
//
// You could receive this error even though you meet other defined session policy
// and session tag limits. For more information, see IAM and STS Entity Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
// and session tags combined was too large. An Amazon Web Services conversion
// compresses the session policy document, session policy ARNs, and session
// tags into a packed binary format that has a separate limit. The error message
// indicates by percentage how close the policies and tags are to the upper
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
// The identity provider (IdP) reported that authentication failed. This might
// be because the claim is invalid.
// You could receive this error even though you meet other defined session policy
// and session tag limits. For more information, see IAM and STS Entity Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
// If this error is returned for the AssumeRoleWithWebIdentity operation, it
// can also mean that the claim has expired or has been explicitly revoked.
// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
// The identity provider (IdP) reported that authentication failed. This might
// be because the claim is invalid.
//
// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
// The web identity token that was passed could not be validated by Amazon Web
// Services. Get a new identity token from the identity provider and then retry
// the request.
// If this error is returned for the AssumeRoleWithWebIdentity operation, it
// can also mean that the claim has expired or has been explicitly revoked.
//
// * ErrCodeExpiredTokenException "ExpiredTokenException"
// The web identity token that was passed is expired or is not valid. Get a
// new identity token from the identity provider and then retry the request.
// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
// The web identity token that was passed could not be validated by Amazon Web
// Services. Get a new identity token from the identity provider and then retry
// the request.
//
// * ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
// - ErrCodeExpiredTokenException "ExpiredTokenException"
// The web identity token that was passed is expired or is not valid. Get a
// new identity token from the identity provider and then retry the request.
//
// - ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
@ -476,14 +476,13 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
//
// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
@ -540,7 +539,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// temporary security credentials to sign calls to Amazon Web Services service
// API operations.
//
// Session Duration
// # Session Duration
//
// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
// last for one hour. However, you can use the optional DurationSeconds parameter
@ -555,7 +554,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
// in the IAM User Guide.
//
// Permissions
// # Permissions
//
// The temporary security credentials created by AssumeRoleWithWebIdentity can
// be used to make API calls to any Amazon Web Services service with the following
@ -576,7 +575,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// in the IAM User Guide.
//
// Tags
// # Tags
//
// (Optional) You can configure your IdP to pass attributes into your web identity
// token as session tags. Each session tag consists of a key name and an associated
@ -610,7 +609,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
// in the IAM User Guide.
//
// Identities
// # Identities
//
// Before your application can call AssumeRoleWithWebIdentity, you must have
// an identity token from a supported identity provider and create a role that
@ -628,24 +627,24 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
// API, see the following resources:
//
// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
//
// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
// Walk through the process of authenticating through Login with Amazon,
// Facebook, or Google, getting temporary security credentials, and then
// using those credentials to make a request to Amazon Web Services.
// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
// Walk through the process of authenticating through Login with Amazon,
// Facebook, or Google, getting temporary security credentials, and then
// using those credentials to make a request to Amazon Web Services.
//
// * Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
// These toolkits contain sample apps that show how to invoke the identity
// providers. The toolkits then show how to use the information from these
// providers to get and use temporary security credentials.
// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
// These toolkits contain sample apps that show how to invoke the identity
// providers. The toolkits then show how to use the information from these
// providers to get and use temporary security credentials.
//
// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
// This article discusses web identity federation and shows an example of
// how to use web identity federation to get access to content in Amazon
// S3.
// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
// This article discusses web identity federation and shows an example of
// how to use web identity federation to get access to content in Amazon
// S3.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -655,54 +654,55 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// API operation AssumeRoleWithWebIdentity for usage and error information.
//
// Returned Error Codes:
// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
// The request was rejected because the policy document was malformed. The error
// message describes the specific error.
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
// and session tags combined was too large. An Amazon Web Services conversion
// compresses the session policy document, session policy ARNs, and session
// tags into a packed binary format that has a separate limit. The error message
// indicates by percentage how close the policies and tags are to the upper
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
// The request was rejected because the policy document was malformed. The error
// message describes the specific error.
//
// You could receive this error even though you meet other defined session policy
// and session tag limits. For more information, see IAM and STS Entity Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
// and session tags combined was too large. An Amazon Web Services conversion
// compresses the session policy document, session policy ARNs, and session
// tags into a packed binary format that has a separate limit. The error message
// indicates by percentage how close the policies and tags are to the upper
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
// The identity provider (IdP) reported that authentication failed. This might
// be because the claim is invalid.
// You could receive this error even though you meet other defined session policy
// and session tag limits. For more information, see IAM and STS Entity Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
// If this error is returned for the AssumeRoleWithWebIdentity operation, it
// can also mean that the claim has expired or has been explicitly revoked.
// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
// The identity provider (IdP) reported that authentication failed. This might
// be because the claim is invalid.
//
// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
// The request could not be fulfilled because the identity provider (IDP) that
// was asked to verify the incoming identity token could not be reached. This
// is often a transient error caused by network conditions. Retry the request
// a limited number of times so that you don't exceed the request rate. If the
// error persists, the identity provider might be down or not responding.
// If this error is returned for the AssumeRoleWithWebIdentity operation, it
// can also mean that the claim has expired or has been explicitly revoked.
//
// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
// The web identity token that was passed could not be validated by Amazon Web
// Services. Get a new identity token from the identity provider and then retry
// the request.
// - ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
// The request could not be fulfilled because the identity provider (IDP) that
// was asked to verify the incoming identity token could not be reached. This
// is often a transient error caused by network conditions. Retry the request
// a limited number of times so that you don't exceed the request rate. If the
// error persists, the identity provider might be down or not responding.
//
// * ErrCodeExpiredTokenException "ExpiredTokenException"
// The web identity token that was passed is expired or is not valid. Get a
// new identity token from the identity provider and then retry the request.
// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
// The web identity token that was passed could not be validated by Amazon Web
// Services. Get a new identity token from the identity provider and then retry
// the request.
//
// * ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
// - ErrCodeExpiredTokenException "ExpiredTokenException"
// The web identity token that was passed is expired or is not valid. Get a
// new identity token from the identity provider and then retry the request.
//
// - ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
@ -742,14 +742,13 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the DecodeAuthorizationMessageRequest method.
// req, resp := client.DecodeAuthorizationMessageRequest(params)
//
// // Example sending a request using the DecodeAuthorizationMessageRequest method.
// req, resp := client.DecodeAuthorizationMessageRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
@ -793,18 +792,18 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
//
// The decoded message includes the following type of information:
//
// * Whether the request was denied due to an explicit deny or due to the
// absence of an explicit allow. For more information, see Determining Whether
// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
// in the IAM User Guide.
// - Whether the request was denied due to an explicit deny or due to the
// absence of an explicit allow. For more information, see Determining Whether
// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
// in the IAM User Guide.
//
// * The principal who made the request.
// - The principal who made the request.
//
// * The requested action.
// - The requested action.
//
// * The requested resource.
// - The requested resource.
//
// * The values of condition keys in the context of the user's request.
// - The values of condition keys in the context of the user's request.
//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
@ -814,10 +813,10 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
// API operation DecodeAuthorizationMessage for usage and error information.
//
// Returned Error Codes:
// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
// The error returned if the message passed to DecodeAuthorizationMessage was
// invalid. This can happen if the token contains invalid characters, such as
// linebreaks.
// - ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
// The error returned if the message passed to DecodeAuthorizationMessage was
// invalid. This can happen if the token contains invalid characters, such as
// linebreaks.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
@ -857,14 +856,13 @@ const opGetAccessKeyInfo = "GetAccessKeyInfo"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the GetAccessKeyInfoRequest method.
// req, resp := client.GetAccessKeyInfoRequest(params)
//
// // Example sending a request using the GetAccessKeyInfoRequest method.
// req, resp := client.GetAccessKeyInfoRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo
func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) {
@ -954,14 +952,13 @@ const opGetCallerIdentity = "GetCallerIdentity"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the GetCallerIdentityRequest method.
// req, resp := client.GetCallerIdentityRequest(params)
//
// // Example sending a request using the GetCallerIdentityRequest method.
// req, resp := client.GetCallerIdentityRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
@ -1037,14 +1034,13 @@ const opGetFederationToken = "GetFederationToken"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the GetFederationTokenRequest method.
// req, resp := client.GetFederationTokenRequest(params)
//
// // Example sending a request using the GetFederationTokenRequest method.
// req, resp := client.GetFederationTokenRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
@ -1094,7 +1090,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// see IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
// in the IAM User Guide.
//
// Session duration
// # Session duration
//
// The temporary credentials are valid for the specified duration, from 900
// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
@ -1102,15 +1098,15 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// by using the Amazon Web Services account root user credentials have a maximum
// duration of 3,600 seconds (1 hour).
//
// Permissions
// # Permissions
//
// You can use the temporary credentials created by GetFederationToken in any
// Amazon Web Services service except the following:
//
// * You cannot call any IAM operations using the CLI or the Amazon Web Services
// API.
// - You cannot call any IAM operations using the CLI or the Amazon Web Services
// API.
//
// * You cannot call any STS operations except GetCallerIdentity.
// - You cannot call any STS operations except GetCallerIdentity.
//
// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
// to this operation. You can pass a single JSON policy document to use as an
@ -1136,7 +1132,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// by the policy. These permissions are granted in addition to the permissions
// granted by the session policies.
//
// Tags
// # Tags
//
// (Optional) You can pass tag key-value pairs to your session. These are called
// session tags. For more information about session tags, see Passing Session
@ -1172,31 +1168,32 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// API operation GetFederationToken for usage and error information.
//
// Returned Error Codes:
// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
// The request was rejected because the policy document was malformed. The error
// message describes the specific error.
//
// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
// and session tags combined was too large. An Amazon Web Services conversion
// compresses the session policy document, session policy ARNs, and session
// tags into a packed binary format that has a separate limit. The error message
// indicates by percentage how close the policies and tags are to the upper
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
// The request was rejected because the policy document was malformed. The error
// message describes the specific error.
//
// You could receive this error even though you meet other defined session policy
// and session tag limits. For more information, see IAM and STS Entity Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
// The request was rejected because the total packed size of the session policies
// and session tags combined was too large. An Amazon Web Services conversion
// compresses the session policy document, session policy ARNs, and session
// tags into a packed binary format that has a separate limit. The error message
// indicates by percentage how close the policies and tags are to the upper
// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
// in the IAM User Guide.
//
// * ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
// You could receive this error even though you meet other defined session policy
// and session tag limits. For more information, see IAM and STS Entity Character
// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
// in the IAM User Guide.
//
// - ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
@ -1236,14 +1233,13 @@ const opGetSessionToken = "GetSessionToken"
// This method is useful when you want to inject custom logic or configuration
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
// // Example sending a request using the GetSessionTokenRequest method.
// req, resp := client.GetSessionTokenRequest(params)
//
// // Example sending a request using the GetSessionTokenRequest method.
// req, resp := client.GetSessionTokenRequest(params)
//
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
// err := req.Send()
// if err == nil { // resp is now filled
// fmt.Println(resp)
// }
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
@ -1285,7 +1281,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html)
// in the IAM User Guide.
//
// Session Duration
// # Session Duration
//
// The GetSessionToken operation must be called by using the long-term Amazon
// Web Services security credentials of the Amazon Web Services account root
@ -1296,15 +1292,15 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// range from 900 seconds (15 minutes) up to 3,600 seconds (1 hour), with a
// default of 1 hour.
//
// Permissions
// # Permissions
//
// The temporary security credentials created by GetSessionToken can be used
// to make API calls to any Amazon Web Services service with the following exceptions:
//
// * You cannot call any IAM API operations unless MFA authentication information
// is included in the request.
// - You cannot call any IAM API operations unless MFA authentication information
// is included in the request.
//
// * You cannot call any STS API except AssumeRole or GetCallerIdentity.
// - You cannot call any STS API except AssumeRole or GetCallerIdentity.
//
// We recommend that you do not call GetSessionToken with Amazon Web Services
// account root user credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
@ -1330,13 +1326,13 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// API operation GetSessionToken for usage and error information.
//
// Returned Error Codes:
// * ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
// - ErrCodeRegionDisabledException "RegionDisabledException"
// STS is not activated in the requested region for the account that is being
// asked to generate credentials. The account administrator must use the IAM
// console to activate STS in that region. For more information, see Activating
// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {

View file

@ -14,7 +14,7 @@
// See sts package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
//
// Using the Client
// # Using the Client
//
// To contact AWS Security Token Service with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.

View file

@ -39,13 +39,14 @@ const (
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a STS client from just a session.
// svc := sts.New(mySession)
// mySession := session.Must(session.NewSession())
//
// // Create a STS client with additional configuration
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
// // Create a STS client from just a session.
// svc := sts.New(mySession)
//
// // Create a STS client with additional configuration
// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
c := p.ClientConfig(EndpointsID, cfgs...)
if c.SigningNameDerived || len(c.SigningName) == 0 {

View file

@ -23,37 +23,37 @@ import (
// can be stubbed out for unit testing your code with the SDK without needing
// to inject custom request handlers into the SDK's request pipeline.
//
// // myFunc uses an SDK service client to make a request to
// // AWS Security Token Service.
// func myFunc(svc stsiface.STSAPI) bool {
// // Make svc.AssumeRole request
// }
// // myFunc uses an SDK service client to make a request to
// // AWS Security Token Service.
// func myFunc(svc stsiface.STSAPI) bool {
// // Make svc.AssumeRole request
// }
//
// func main() {
// sess := session.New()
// svc := sts.New(sess)
// func main() {
// sess := session.New()
// svc := sts.New(sess)
//
// myFunc(svc)
// }
// myFunc(svc)
// }
//
// In your _test.go file:
//
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockSTSClient struct {
// stsiface.STSAPI
// }
// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
// // mock response/functionality
// }
// // Define a mock struct to be used in your unit tests of myFunc.
// type mockSTSClient struct {
// stsiface.STSAPI
// }
// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
// // mock response/functionality
// }
//
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockSTSClient{}
// func TestMyFunc(t *testing.T) {
// // Setup Test
// mockSvc := &mockSTSClient{}
//
// myfunc(mockSvc)
// myfunc(mockSvc)
//
// // Verify myFunc's functionality
// }
// // Verify myFunc's functionality
// }
//
// It is important to note that this interface will have breaking changes
// when the service model is updated and adds new API operations, paginators,

View file

@ -275,7 +275,9 @@ func (a *App) RunContext(ctx context.Context, arguments []string) (err error) {
cCtx := NewContext(a, set, &Context{Context: ctx})
if nerr != nil {
_, _ = fmt.Fprintln(a.Writer, nerr)
_ = ShowAppHelp(cCtx)
if !a.HideHelp {
_ = ShowAppHelp(cCtx)
}
return nerr
}
cCtx.shellComplete = shellComplete
@ -296,10 +298,24 @@ func (a *App) RunContext(ctx context.Context, arguments []string) (err error) {
fmt.Fprintf(a.Writer, suggestion)
}
}
_ = ShowAppHelp(cCtx)
if !a.HideHelp {
_ = ShowAppHelp(cCtx)
}
return err
}
if a.After != nil {
defer func() {
if afterErr := a.After(cCtx); afterErr != nil {
if err != nil {
err = newMultiError(err, afterErr)
} else {
err = afterErr
}
}
}()
}
if !a.HideHelp && checkHelp(cCtx) {
_ = ShowAppHelp(cCtx)
return nil
@ -316,18 +332,6 @@ func (a *App) RunContext(ctx context.Context, arguments []string) (err error) {
return cerr
}
if a.After != nil {
defer func() {
if afterErr := a.After(cCtx); afterErr != nil {
if err != nil {
err = newMultiError(err, afterErr)
} else {
err = afterErr
}
}
}()
}
if a.Before != nil {
beforeErr := a.Before(cCtx)
if beforeErr != nil {

View file

@ -125,7 +125,9 @@ func (c *Command) Run(ctx *Context) (err error) {
fmt.Fprintf(cCtx.App.Writer, suggestion)
}
}
_ = ShowCommandHelp(cCtx, c.Name)
if !c.HideHelp {
_ = ShowCommandHelp(cCtx, c.Name)
}
return err
}
@ -135,7 +137,9 @@ func (c *Command) Run(ctx *Context) (err error) {
cerr := cCtx.checkRequiredFlags(c.Flags)
if cerr != nil {
_ = ShowCommandHelp(cCtx, c.Name)
if !c.HideHelp {
_ = ShowCommandHelp(cCtx, c.Name)
}
return cerr
}

View file

@ -4,6 +4,23 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.10.0] - 2022-08-11
### Added
- Add `atomic.Float32` type for atomic operations on `float32`.
- Add `CompareAndSwap` and `Swap` methods to `atomic.String`, `atomic.Error`,
and `atomic.Value`.
- Add generic `atomic.Pointer[T]` type for atomic operations on pointers of any
type. This is present only for Go 1.18 or higher, and is a drop-in for
replacement for the standard library's `sync/atomic.Pointer` type.
### Changed
- Deprecate `CAS` methods on all types in favor of corresponding
`CompareAndSwap` methods.
Thanks to @eNV25 and @icpd for their contributions to this release.
[1.10.0]: https://github.com/uber-go/atomic/compare/v1.9.0...v1.10.0
## [1.9.0] - 2021-07-15
### Added
- Add `Float64.Swap` to match int atomic operations.

11
vendor/go.uber.org/atomic/bool.go generated vendored
View file

@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
// Copyright (c) 2020-2021 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -55,8 +55,15 @@ func (x *Bool) Store(val bool) {
}
// CAS is an atomic compare-and-swap for bool values.
//
// Deprecated: Use CompareAndSwap.
func (x *Bool) CAS(old, new bool) (swapped bool) {
return x.v.CAS(boolToInt(old), boolToInt(new))
return x.CompareAndSwap(old, new)
}
// CompareAndSwap is an atomic compare-and-swap for bool values.
func (x *Bool) CompareAndSwap(old, new bool) (swapped bool) {
return x.v.CompareAndSwap(boolToInt(old), boolToInt(new))
}
// Swap atomically stores the given bool and returns the old

View file

@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
// Copyright (c) 2020-2021 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -56,8 +56,15 @@ func (x *Duration) Store(val time.Duration) {
}
// CAS is an atomic compare-and-swap for time.Duration values.
//
// Deprecated: Use CompareAndSwap.
func (x *Duration) CAS(old, new time.Duration) (swapped bool) {
return x.v.CAS(int64(old), int64(new))
return x.CompareAndSwap(old, new)
}
// CompareAndSwap is an atomic compare-and-swap for time.Duration values.
func (x *Duration) CompareAndSwap(old, new time.Duration) (swapped bool) {
return x.v.CompareAndSwap(int64(old), int64(new))
}
// Swap atomically stores the given time.Duration and returns the old

13
vendor/go.uber.org/atomic/error.go generated vendored
View file

@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
// Copyright (c) 2020-2021 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -49,3 +49,14 @@ func (x *Error) Load() error {
func (x *Error) Store(val error) {
x.v.Store(packError(val))
}
// CompareAndSwap is an atomic compare-and-swap for error values.
func (x *Error) CompareAndSwap(old, new error) (swapped bool) {
return x.v.CompareAndSwap(packError(old), packError(new))
}
// Swap atomically stores the given error and returns the old
// value.
func (x *Error) Swap(val error) (old error) {
return unpackError(x.v.Swap(packError(val)))
}

View file

@ -1,4 +1,4 @@
// Copyright (c) 2020 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -23,7 +23,7 @@ package atomic
// atomic.Value panics on nil inputs, or if the underlying type changes.
// Stabilize by always storing a custom struct that we control.
//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go
//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -compareandswap -swap -file=error.go
type packedError struct{ Value error }

77
vendor/go.uber.org/atomic/float32.go generated vendored Normal file
View file

@ -0,0 +1,77 @@
// @generated Code generated by gen-atomicwrapper.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import (
"encoding/json"
"math"
)
// Float32 is an atomic type-safe wrapper for float32 values.
type Float32 struct {
_ nocmp // disallow non-atomic comparison
v Uint32
}
var _zeroFloat32 float32
// NewFloat32 creates a new Float32.
func NewFloat32(val float32) *Float32 {
x := &Float32{}
if val != _zeroFloat32 {
x.Store(val)
}
return x
}
// Load atomically loads the wrapped float32.
func (x *Float32) Load() float32 {
return math.Float32frombits(x.v.Load())
}
// Store atomically stores the passed float32.
func (x *Float32) Store(val float32) {
x.v.Store(math.Float32bits(val))
}
// Swap atomically stores the given float32 and returns the old
// value.
func (x *Float32) Swap(val float32) (old float32) {
return math.Float32frombits(x.v.Swap(math.Float32bits(val)))
}
// MarshalJSON encodes the wrapped float32 into JSON.
func (x *Float32) MarshalJSON() ([]byte, error) {
return json.Marshal(x.Load())
}
// UnmarshalJSON decodes a float32 from JSON.
func (x *Float32) UnmarshalJSON(b []byte) error {
var v float32
if err := json.Unmarshal(b, &v); err != nil {
return err
}
x.Store(v)
return nil
}

76
vendor/go.uber.org/atomic/float32_ext.go generated vendored Normal file
View file

@ -0,0 +1,76 @@
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package atomic
import (
"math"
"strconv"
)
//go:generate bin/gen-atomicwrapper -name=Float32 -type=float32 -wrapped=Uint32 -pack=math.Float32bits -unpack=math.Float32frombits -swap -json -imports math -file=float32.go
// Add atomically adds to the wrapped float32 and returns the new value.
func (f *Float32) Add(delta float32) float32 {
for {
old := f.Load()
new := old + delta
if f.CAS(old, new) {
return new
}
}
}
// Sub atomically subtracts from the wrapped float32 and returns the new value.
func (f *Float32) Sub(delta float32) float32 {
return f.Add(-delta)
}
// CAS is an atomic compare-and-swap for float32 values.
//
// Deprecated: Use CompareAndSwap
func (f *Float32) CAS(old, new float32) (swapped bool) {
return f.CompareAndSwap(old, new)
}
// CompareAndSwap is an atomic compare-and-swap for float32 values.
//
// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
//
// for {
// old := atom.Load()
// new = f(old)
// if atom.CompareAndSwap(old, new) {
// break
// }
// }
//
// If CompareAndSwap did not match NaN to match, then the above would loop forever.
func (f *Float32) CompareAndSwap(old, new float32) (swapped bool) {
return f.v.CompareAndSwap(math.Float32bits(old), math.Float32bits(new))
}
// String encodes the wrapped value as a string.
func (f *Float32) String() string {
// 'g' is the behavior for floats with %v.
return strconv.FormatFloat(float64(f.Load()), 'g', -1, 32)
}

View file

@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
// Copyright (c) 2020-2021 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal

View file

@ -1,4 +1,4 @@
// Copyright (c) 2020 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -45,21 +45,28 @@ func (f *Float64) Sub(delta float64) float64 {
// CAS is an atomic compare-and-swap for float64 values.
//
// Note: CAS handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
// but CAS allows a stored NaN to compare equal to a passed in NaN.
// This avoids typical CAS loops from blocking forever, e.g.,
//
// for {
// old := atom.Load()
// new = f(old)
// if atom.CAS(old, new) {
// break
// }
// }
//
// If CAS did not match NaN to match, then the above would loop forever.
// Deprecated: Use CompareAndSwap
func (f *Float64) CAS(old, new float64) (swapped bool) {
return f.v.CAS(math.Float64bits(old), math.Float64bits(new))
return f.CompareAndSwap(old, new)
}
// CompareAndSwap is an atomic compare-and-swap for float64 values.
//
// Note: CompareAndSwap handles NaN incorrectly. NaN != NaN using Go's inbuilt operators
// but CompareAndSwap allows a stored NaN to compare equal to a passed in NaN.
// This avoids typical CompareAndSwap loops from blocking forever, e.g.,
//
// for {
// old := atom.Load()
// new = f(old)
// if atom.CompareAndSwap(old, new) {
// break
// }
// }
//
// If CompareAndSwap did not match NaN to match, then the above would loop forever.
func (f *Float64) CompareAndSwap(old, new float64) (swapped bool) {
return f.v.CompareAndSwap(math.Float64bits(old), math.Float64bits(new))
}
// String encodes the wrapped value as a string.

9
vendor/go.uber.org/atomic/int32.go generated vendored
View file

@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
// Copyright (c) 2020-2021 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -66,7 +66,14 @@ func (i *Int32) Dec() int32 {
}
// CAS is an atomic compare-and-swap.
//
// Deprecated: Use CompareAndSwap.
func (i *Int32) CAS(old, new int32) (swapped bool) {
return i.CompareAndSwap(old, new)
}
// CompareAndSwap is an atomic compare-and-swap.
func (i *Int32) CompareAndSwap(old, new int32) (swapped bool) {
return atomic.CompareAndSwapInt32(&i.v, old, new)
}

9
vendor/go.uber.org/atomic/int64.go generated vendored
View file

@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
// Copyright (c) 2020-2021 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -66,7 +66,14 @@ func (i *Int64) Dec() int64 {
}
// CAS is an atomic compare-and-swap.
//
// Deprecated: Use CompareAndSwap.
func (i *Int64) CAS(old, new int64) (swapped bool) {
return i.CompareAndSwap(old, new)
}
// CompareAndSwap is an atomic compare-and-swap.
func (i *Int64) CompareAndSwap(old, new int64) (swapped bool) {
return atomic.CompareAndSwapInt64(&i.v, old, new)
}

12
vendor/go.uber.org/atomic/nocmp.go generated vendored
View file

@ -23,13 +23,13 @@ package atomic
// nocmp is an uncomparable struct. Embed this inside another struct to make
// it uncomparable.
//
// type Foo struct {
// nocmp
// // ...
// }
// type Foo struct {
// nocmp
// // ...
// }
//
// This DOES NOT:
//
// - Disallow shallow copies of structs
// - Disallow comparison of pointers to uncomparable structs
// - Disallow shallow copies of structs
// - Disallow comparison of pointers to uncomparable structs
type nocmp [0]func()

60
vendor/go.uber.org/atomic/pointer_go118.go generated vendored Normal file
View file

@ -0,0 +1,60 @@
// Copyright (c) 2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build go1.18 && !go1.19
// +build go1.18,!go1.19
package atomic
import "unsafe"
type Pointer[T any] struct {
_ nocmp // disallow non-atomic comparison
p UnsafePointer
}
// NewPointer creates a new Pointer.
func NewPointer[T any](v *T) *Pointer[T] {
var p Pointer[T]
if v != nil {
p.p.Store(unsafe.Pointer(v))
}
return &p
}
// Load atomically loads the wrapped value.
func (p *Pointer[T]) Load() *T {
return (*T)(p.p.Load())
}
// Store atomically stores the passed value.
func (p *Pointer[T]) Store(val *T) {
p.p.Store(unsafe.Pointer(val))
}
// Swap atomically swaps the wrapped pointer and returns the old value.
func (p *Pointer[T]) Swap(val *T) (old *T) {
return (*T)(p.p.Swap(unsafe.Pointer(val)))
}
// CompareAndSwap is an atomic compare-and-swap.
func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
return p.p.CompareAndSwap(unsafe.Pointer(old), unsafe.Pointer(new))
}

61
vendor/go.uber.org/atomic/pointer_go119.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
// Copyright (c) 2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:build go1.19
// +build go1.19
package atomic
import "sync/atomic"
// Pointer is an atomic pointer of type *T.
type Pointer[T any] struct {
_ nocmp // disallow non-atomic comparison
p atomic.Pointer[T]
}
// NewPointer creates a new Pointer.
func NewPointer[T any](v *T) *Pointer[T] {
var p Pointer[T]
if v != nil {
p.p.Store(v)
}
return &p
}
// Load atomically loads the wrapped value.
func (p *Pointer[T]) Load() *T {
return p.p.Load()
}
// Store atomically stores the passed value.
func (p *Pointer[T]) Store(val *T) {
p.p.Store(val)
}
// Swap atomically swaps the wrapped pointer and returns the old value.
func (p *Pointer[T]) Swap(val *T) (old *T) {
return p.p.Swap(val)
}
// CompareAndSwap is an atomic compare-and-swap.
func (p *Pointer[T]) CompareAndSwap(old, new *T) (swapped bool) {
return p.p.CompareAndSwap(old, new)
}

13
vendor/go.uber.org/atomic/string.go generated vendored
View file

@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
// Copyright (c) 2020-2021 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -52,3 +52,14 @@ func (x *String) Load() string {
func (x *String) Store(val string) {
x.v.Store(val)
}
// CompareAndSwap is an atomic compare-and-swap for string values.
func (x *String) CompareAndSwap(old, new string) (swapped bool) {
return x.v.CompareAndSwap(old, new)
}
// Swap atomically stores the given string and returns the old
// value.
func (x *String) Swap(val string) (old string) {
return x.v.Swap(val).(string)
}

View file

@ -1,4 +1,4 @@
// Copyright (c) 2020 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -20,9 +20,7 @@
package atomic
//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go
// Note: No Swap as String wraps Value, which wraps the stdlib sync/atomic.Value which
// only supports Swap as of go1.17: https://github.com/golang/go/issues/39351
//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -compareandswap -swap -file=string.go
// String returns the wrapped value.
func (s *String) String() string {

2
vendor/go.uber.org/atomic/time.go generated vendored
View file

@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicwrapper.
// Copyright (c) 2020-2021 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal

View file

@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
// Copyright (c) 2020-2021 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -66,7 +66,14 @@ func (i *Uint32) Dec() uint32 {
}
// CAS is an atomic compare-and-swap.
//
// Deprecated: Use CompareAndSwap.
func (i *Uint32) CAS(old, new uint32) (swapped bool) {
return i.CompareAndSwap(old, new)
}
// CompareAndSwap is an atomic compare-and-swap.
func (i *Uint32) CompareAndSwap(old, new uint32) (swapped bool) {
return atomic.CompareAndSwapUint32(&i.v, old, new)
}

View file

@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
// Copyright (c) 2020-2021 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -66,7 +66,14 @@ func (i *Uint64) Dec() uint64 {
}
// CAS is an atomic compare-and-swap.
//
// Deprecated: Use CompareAndSwap.
func (i *Uint64) CAS(old, new uint64) (swapped bool) {
return i.CompareAndSwap(old, new)
}
// CompareAndSwap is an atomic compare-and-swap.
func (i *Uint64) CompareAndSwap(old, new uint64) (swapped bool) {
return atomic.CompareAndSwapUint64(&i.v, old, new)
}

View file

@ -1,6 +1,6 @@
// @generated Code generated by gen-atomicint.
// Copyright (c) 2020-2021 Uber Technologies, Inc.
// Copyright (c) 2020-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -66,7 +66,14 @@ func (i *Uintptr) Dec() uintptr {
}
// CAS is an atomic compare-and-swap.
//
// Deprecated: Use CompareAndSwap.
func (i *Uintptr) CAS(old, new uintptr) (swapped bool) {
return i.CompareAndSwap(old, new)
}
// CompareAndSwap is an atomic compare-and-swap.
func (i *Uintptr) CompareAndSwap(old, new uintptr) (swapped bool) {
return atomic.CompareAndSwapUintptr(&i.v, old, new)
}

View file

@ -1,4 +1,4 @@
// Copyright (c) 2021 Uber Technologies, Inc.
// Copyright (c) 2021-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@ -53,6 +53,13 @@ func (p *UnsafePointer) Swap(val unsafe.Pointer) (old unsafe.Pointer) {
}
// CAS is an atomic compare-and-swap.
//
// Deprecated: Use CompareAndSwap
func (p *UnsafePointer) CAS(old, new unsafe.Pointer) (swapped bool) {
return p.CompareAndSwap(old, new)
}
// CompareAndSwap is an atomic compare-and-swap.
func (p *UnsafePointer) CompareAndSwap(old, new unsafe.Pointer) (swapped bool) {
return atomic.CompareAndSwapPointer(&p.v, old, new)
}

4
vendor/go.uber.org/atomic/value.go generated vendored
View file

@ -25,7 +25,7 @@ import "sync/atomic"
// Value shadows the type of the same name from sync/atomic
// https://godoc.org/sync/atomic#Value
type Value struct {
atomic.Value
_ nocmp // disallow non-atomic comparison
atomic.Value
}

View file

@ -1747,6 +1747,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Sender sending more than they'd declared?
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
if sc.inflow.available() < int32(f.Length) {
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
}
sc.inflow.take(int32(f.Length))
sc.sendWindowUpdate(nil, int(f.Length)) // conn-level
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
// value of a content-length header field does not equal the sum of the
@ -2223,6 +2229,9 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler
didPanic := true
defer func() {
rw.rws.stream.cancelCtx()
if req.MultipartForm != nil {
req.MultipartForm.RemoveAll()
}
if didPanic {
e := recover()
sc.writeFrameFromHandler(FrameWriteRequest{

View file

@ -67,13 +67,23 @@ const (
// A Transport internally caches connections to servers. It is safe
// for concurrent use by multiple goroutines.
type Transport struct {
// DialTLS specifies an optional dial function for creating
// TLS connections for requests.
// DialTLSContext specifies an optional dial function with context for
// creating TLS connections for requests.
//
// If DialTLS is nil, tls.Dial is used.
// If DialTLSContext and DialTLS is nil, tls.Dial is used.
//
// If the returned net.Conn has a ConnectionState method like tls.Conn,
// it will be used to set http.Response.TLS.
DialTLSContext func(ctx context.Context, network, addr string, cfg *tls.Config) (net.Conn, error)
// DialTLS specifies an optional dial function for creating
// TLS connections for requests.
//
// If DialTLSContext and DialTLS is nil, tls.Dial is used.
//
// Deprecated: Use DialTLSContext instead, which allows the transport
// to cancel dials as soon as they are no longer needed.
// If both are set, DialTLSContext takes priority.
DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
// TLSClientConfig specifies the TLS configuration to use with
@ -592,7 +602,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
if err != nil {
return nil, err
}
tconn, err := t.dialTLS(ctx)("tcp", addr, t.newTLSConfig(host))
tconn, err := t.dialTLS(ctx, "tcp", addr, t.newTLSConfig(host))
if err != nil {
return nil, err
}
@ -613,24 +623,25 @@ func (t *Transport) newTLSConfig(host string) *tls.Config {
return cfg
}
func (t *Transport) dialTLS(ctx context.Context) func(string, string, *tls.Config) (net.Conn, error) {
if t.DialTLS != nil {
return t.DialTLS
func (t *Transport) dialTLS(ctx context.Context, network, addr string, tlsCfg *tls.Config) (net.Conn, error) {
if t.DialTLSContext != nil {
return t.DialTLSContext(ctx, network, addr, tlsCfg)
} else if t.DialTLS != nil {
return t.DialTLS(network, addr, tlsCfg)
}
return func(network, addr string, cfg *tls.Config) (net.Conn, error) {
tlsCn, err := t.dialTLSWithContext(ctx, network, addr, cfg)
if err != nil {
return nil, err
}
state := tlsCn.ConnectionState()
if p := state.NegotiatedProtocol; p != NextProtoTLS {
return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
}
if !state.NegotiatedProtocolIsMutual {
return nil, errors.New("http2: could not negotiate protocol mutually")
}
return tlsCn, nil
tlsCn, err := t.dialTLSWithContext(ctx, network, addr, tlsCfg)
if err != nil {
return nil, err
}
state := tlsCn.ConnectionState()
if p := state.NegotiatedProtocol; p != NextProtoTLS {
return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
}
if !state.NegotiatedProtocolIsMutual {
return nil, errors.New("http2: could not negotiate protocol mutually")
}
return tlsCn, nil
}
// disableKeepAlives reports whether connections should be closed as

View file

@ -178,7 +178,7 @@ type executableResponse struct {
Message string `json:"message,omitempty"`
}
func parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) {
func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte, source string, now int64) (string, error) {
var result executableResponse
if err := json.Unmarshal(response, &result); err != nil {
return "", jsonParsingError(source, string(response))
@ -203,7 +203,7 @@ func parseSubjectTokenFromSource(response []byte, source string, now int64) (str
return "", unsupportedVersionError(source, result.Version)
}
if result.ExpirationTime == 0 {
if result.ExpirationTime == 0 && cs.OutputFile != "" {
return "", missingFieldError(source, "expiration_time")
}
@ -211,7 +211,7 @@ func parseSubjectTokenFromSource(response []byte, source string, now int64) (str
return "", missingFieldError(source, "token_type")
}
if result.ExpirationTime < now {
if result.ExpirationTime != 0 && result.ExpirationTime < now {
return "", tokenExpiredError()
}
@ -259,7 +259,7 @@ func (cs executableCredentialSource) getTokenFromOutputFile() (token string, err
return "", nil
}
token, err = parseSubjectTokenFromSource(data, outputFileSource, cs.env.now().Unix())
token, err = cs.parseSubjectTokenFromSource(data, outputFileSource, cs.env.now().Unix())
if err != nil {
if _, ok := err.(nonCacheableError); ok {
// If the cached token is expired we need a new token,
@ -304,5 +304,5 @@ func (cs executableCredentialSource) getTokenFromExecutableCommand() (string, er
if err != nil {
return "", err
}
return parseSubjectTokenFromSource(output, executableSource, cs.env.now().Unix())
return cs.parseSubjectTokenFromSource(output, executableSource, cs.env.now().Unix())
}

View file

@ -73,12 +73,12 @@ aix_ppc64)
darwin_amd64)
mkerrors="$mkerrors -m64"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
mkasm="go run mkasm_darwin.go"
mkasm="go run mkasm.go"
;;
darwin_arm64)
mkerrors="$mkerrors -m64"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
mkasm="go run mkasm_darwin.go"
mkasm="go run mkasm.go"
;;
dragonfly_amd64)
mkerrors="$mkerrors -m64"
@ -142,17 +142,17 @@ netbsd_arm64)
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_386)
mkasm="go run mkasm.go"
mkerrors="$mkerrors -m32"
mksyscall="go run mksyscall.go -l32 -openbsd"
mksyscall="go run mksyscall.go -l32 -openbsd -libc"
mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_amd64)
mkasm="go run mkasm.go"
mkerrors="$mkerrors -m64"
mksyscall="go run mksyscall.go -openbsd"
mksyscall="go run mksyscall.go -openbsd -libc"
mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
openbsd_arm)
@ -165,10 +165,10 @@ openbsd_arm)
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
;;
openbsd_arm64)
mkasm="go run mkasm.go"
mkerrors="$mkerrors -m64"
mksyscall="go run mksyscall.go -openbsd"
mksyscall="go run mksyscall.go -openbsd -libc"
mksysctl="go run mksysctl_openbsd.go"
mksysnum="go run mksysnum.go 'https://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master'"
# Let the type of C char be signed for making the bare syscall
# API consistent across platforms.
mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
@ -232,5 +232,5 @@ esac
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go | go run mkpost.go > ztypes_$GOOSARCH.go"; fi
if [ -n "$mkasm" ]; then echo "$mkasm $GOARCH"; fi
if [ -n "$mkasm" ]; then echo "$mkasm $GOOS $GOARCH"; fi
) | $run

27
vendor/golang.org/x/sys/unix/syscall_openbsd_libc.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build (openbsd && 386) || (openbsd && amd64) || (openbsd && arm64)
// +build openbsd,386 openbsd,amd64 openbsd,arm64
package unix
import _ "unsafe"
// Implemented in the runtime package (runtime/sys_openbsd3.go)
func syscall_syscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
func syscall_syscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
func syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10 uintptr) (r1, r2 uintptr, err Errno)
func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno)
func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno)
//go:linkname syscall_syscall syscall.syscall
//go:linkname syscall_syscall6 syscall.syscall6
//go:linkname syscall_syscall10 syscall.syscall10
//go:linkname syscall_rawSyscall syscall.rawSyscall
//go:linkname syscall_rawSyscall6 syscall.rawSyscall6
func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) {
return syscall_syscall10(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, 0)
}

View file

@ -1,4 +1,4 @@
// go run mkasm_darwin.go amd64
// go run mkasm.go darwin amd64
// Code generated by the command above; DO NOT EDIT.
//go:build go1.13

View file

@ -1,4 +1,4 @@
// go run mkasm_darwin.go amd64
// go run mkasm.go darwin amd64
// Code generated by the command above; DO NOT EDIT.
//go:build go1.12

View file

@ -1,4 +1,4 @@
// go run mkasm_darwin.go arm64
// go run mkasm.go darwin arm64
// Code generated by the command above; DO NOT EDIT.
//go:build go1.13

View file

@ -1,4 +1,4 @@
// go run mkasm_darwin.go arm64
// go run mkasm.go darwin arm64
// Code generated by the command above; DO NOT EDIT.
//go:build go1.12

File diff suppressed because it is too large Load diff

796
vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s generated vendored Normal file
View file

@ -0,0 +1,796 @@
// go run mkasm.go openbsd 386
// Code generated by the command above; DO NOT EDIT.
#include "textflag.h"
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $4
DATA ·libc_getgroups_trampoline_addr(SB)/4, $libc_getgroups_trampoline<>(SB)
TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $4
DATA ·libc_setgroups_trampoline_addr(SB)/4, $libc_setgroups_trampoline<>(SB)
TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $4
DATA ·libc_wait4_trampoline_addr(SB)/4, $libc_wait4_trampoline<>(SB)
TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $4
DATA ·libc_accept_trampoline_addr(SB)/4, $libc_accept_trampoline<>(SB)
TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $4
DATA ·libc_bind_trampoline_addr(SB)/4, $libc_bind_trampoline<>(SB)
TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $4
DATA ·libc_connect_trampoline_addr(SB)/4, $libc_connect_trampoline<>(SB)
TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $4
DATA ·libc_socket_trampoline_addr(SB)/4, $libc_socket_trampoline<>(SB)
TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $4
DATA ·libc_getsockopt_trampoline_addr(SB)/4, $libc_getsockopt_trampoline<>(SB)
TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $4
DATA ·libc_setsockopt_trampoline_addr(SB)/4, $libc_setsockopt_trampoline<>(SB)
TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpeername_trampoline_addr(SB)/4, $libc_getpeername_trampoline<>(SB)
TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $4
DATA ·libc_getsockname_trampoline_addr(SB)/4, $libc_getsockname_trampoline<>(SB)
TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $4
DATA ·libc_shutdown_trampoline_addr(SB)/4, $libc_shutdown_trampoline<>(SB)
TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $4
DATA ·libc_socketpair_trampoline_addr(SB)/4, $libc_socketpair_trampoline<>(SB)
TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $4
DATA ·libc_recvfrom_trampoline_addr(SB)/4, $libc_recvfrom_trampoline<>(SB)
TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $4
DATA ·libc_sendto_trampoline_addr(SB)/4, $libc_sendto_trampoline<>(SB)
TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $4
DATA ·libc_recvmsg_trampoline_addr(SB)/4, $libc_recvmsg_trampoline<>(SB)
TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $4
DATA ·libc_sendmsg_trampoline_addr(SB)/4, $libc_sendmsg_trampoline<>(SB)
TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $4
DATA ·libc_kevent_trampoline_addr(SB)/4, $libc_kevent_trampoline<>(SB)
TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $4
DATA ·libc_utimes_trampoline_addr(SB)/4, $libc_utimes_trampoline<>(SB)
TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $4
DATA ·libc_futimes_trampoline_addr(SB)/4, $libc_futimes_trampoline<>(SB)
TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $4
DATA ·libc_poll_trampoline_addr(SB)/4, $libc_poll_trampoline<>(SB)
TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $4
DATA ·libc_madvise_trampoline_addr(SB)/4, $libc_madvise_trampoline<>(SB)
TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $4
DATA ·libc_mlock_trampoline_addr(SB)/4, $libc_mlock_trampoline<>(SB)
TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $4
DATA ·libc_mlockall_trampoline_addr(SB)/4, $libc_mlockall_trampoline<>(SB)
TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $4
DATA ·libc_mprotect_trampoline_addr(SB)/4, $libc_mprotect_trampoline<>(SB)
TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $4
DATA ·libc_msync_trampoline_addr(SB)/4, $libc_msync_trampoline<>(SB)
TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $4
DATA ·libc_munlock_trampoline_addr(SB)/4, $libc_munlock_trampoline<>(SB)
TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $4
DATA ·libc_munlockall_trampoline_addr(SB)/4, $libc_munlockall_trampoline<>(SB)
TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe2(SB)
GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $4
DATA ·libc_pipe2_trampoline_addr(SB)/4, $libc_pipe2_trampoline<>(SB)
TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getdents(SB)
GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $4
DATA ·libc_getdents_trampoline_addr(SB)/4, $libc_getdents_trampoline<>(SB)
TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getcwd(SB)
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $4
DATA ·libc_getcwd_trampoline_addr(SB)/4, $libc_getcwd_trampoline<>(SB)
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $4
DATA ·libc_ioctl_trampoline_addr(SB)/4, $libc_ioctl_trampoline<>(SB)
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4
DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4
DATA ·libc_ppoll_trampoline_addr(SB)/4, $libc_ppoll_trampoline<>(SB)
TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_access(SB)
GLOBL ·libc_access_trampoline_addr(SB), RODATA, $4
DATA ·libc_access_trampoline_addr(SB)/4, $libc_access_trampoline<>(SB)
TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $4
DATA ·libc_adjtime_trampoline_addr(SB)/4, $libc_adjtime_trampoline<>(SB)
TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_chdir_trampoline_addr(SB)/4, $libc_chdir_trampoline<>(SB)
TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $4
DATA ·libc_chflags_trampoline_addr(SB)/4, $libc_chflags_trampoline<>(SB)
TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $4
DATA ·libc_chmod_trampoline_addr(SB)/4, $libc_chmod_trampoline<>(SB)
TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $4
DATA ·libc_chown_trampoline_addr(SB)/4, $libc_chown_trampoline<>(SB)
TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $4
DATA ·libc_chroot_trampoline_addr(SB)/4, $libc_chroot_trampoline<>(SB)
TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_close(SB)
GLOBL ·libc_close_trampoline_addr(SB), RODATA, $4
DATA ·libc_close_trampoline_addr(SB)/4, $libc_close_trampoline<>(SB)
TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $4
DATA ·libc_dup_trampoline_addr(SB)/4, $libc_dup_trampoline<>(SB)
TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $4
DATA ·libc_dup2_trampoline_addr(SB)/4, $libc_dup2_trampoline<>(SB)
TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup3(SB)
GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $4
DATA ·libc_dup3_trampoline_addr(SB)/4, $libc_dup3_trampoline<>(SB)
TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $4
DATA ·libc_exit_trampoline_addr(SB)/4, $libc_exit_trampoline<>(SB)
TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $4
DATA ·libc_faccessat_trampoline_addr(SB)/4, $libc_faccessat_trampoline<>(SB)
TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchdir_trampoline_addr(SB)/4, $libc_fchdir_trampoline<>(SB)
TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchflags_trampoline_addr(SB)/4, $libc_fchflags_trampoline<>(SB)
TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchmod_trampoline_addr(SB)/4, $libc_fchmod_trampoline<>(SB)
TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchmodat_trampoline_addr(SB)/4, $libc_fchmodat_trampoline<>(SB)
TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchown_trampoline_addr(SB)/4, $libc_fchown_trampoline<>(SB)
TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fchownat_trampoline_addr(SB)/4, $libc_fchownat_trampoline<>(SB)
TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $4
DATA ·libc_flock_trampoline_addr(SB)/4, $libc_flock_trampoline<>(SB)
TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $4
DATA ·libc_fpathconf_trampoline_addr(SB)/4, $libc_fpathconf_trampoline<>(SB)
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fstat_trampoline_addr(SB)/4, $libc_fstat_trampoline<>(SB)
TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatat(SB)
GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $4
DATA ·libc_fstatat_trampoline_addr(SB)/4, $libc_fstatat_trampoline<>(SB)
TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatfs(SB)
GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $4
DATA ·libc_fstatfs_trampoline_addr(SB)/4, $libc_fstatfs_trampoline<>(SB)
TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $4
DATA ·libc_fsync_trampoline_addr(SB)/4, $libc_fsync_trampoline<>(SB)
TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $4
DATA ·libc_ftruncate_trampoline_addr(SB)/4, $libc_ftruncate_trampoline<>(SB)
TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getegid_trampoline_addr(SB)/4, $libc_getegid_trampoline<>(SB)
TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_geteuid_trampoline_addr(SB)/4, $libc_geteuid_trampoline<>(SB)
TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getgid_trampoline_addr(SB)/4, $libc_getgid_trampoline<>(SB)
TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpgid_trampoline_addr(SB)/4, $libc_getpgid_trampoline<>(SB)
TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpgrp_trampoline_addr(SB)/4, $libc_getpgrp_trampoline<>(SB)
TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpid_trampoline_addr(SB)/4, $libc_getpid_trampoline<>(SB)
TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getppid_trampoline_addr(SB)/4, $libc_getppid_trampoline<>(SB)
TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $4
DATA ·libc_getpriority_trampoline_addr(SB)/4, $libc_getpriority_trampoline<>(SB)
TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $4
DATA ·libc_getrlimit_trampoline_addr(SB)/4, $libc_getrlimit_trampoline<>(SB)
TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrtable(SB)
GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $4
DATA ·libc_getrtable_trampoline_addr(SB)/4, $libc_getrtable_trampoline<>(SB)
TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $4
DATA ·libc_getrusage_trampoline_addr(SB)/4, $libc_getrusage_trampoline<>(SB)
TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getsid_trampoline_addr(SB)/4, $libc_getsid_trampoline<>(SB)
TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $4
DATA ·libc_gettimeofday_trampoline_addr(SB)/4, $libc_gettimeofday_trampoline<>(SB)
TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_getuid_trampoline_addr(SB)/4, $libc_getuid_trampoline<>(SB)
TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $4
DATA ·libc_issetugid_trampoline_addr(SB)/4, $libc_issetugid_trampoline<>(SB)
TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $4
DATA ·libc_kill_trampoline_addr(SB)/4, $libc_kill_trampoline<>(SB)
TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $4
DATA ·libc_kqueue_trampoline_addr(SB)/4, $libc_kqueue_trampoline<>(SB)
TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $4
DATA ·libc_lchown_trampoline_addr(SB)/4, $libc_lchown_trampoline<>(SB)
TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_link(SB)
GLOBL ·libc_link_trampoline_addr(SB), RODATA, $4
DATA ·libc_link_trampoline_addr(SB)/4, $libc_link_trampoline<>(SB)
TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_linkat_trampoline_addr(SB)/4, $libc_linkat_trampoline<>(SB)
TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $4
DATA ·libc_listen_trampoline_addr(SB)/4, $libc_listen_trampoline<>(SB)
TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lstat(SB)
GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $4
DATA ·libc_lstat_trampoline_addr(SB)/4, $libc_lstat_trampoline<>(SB)
TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkdir_trampoline_addr(SB)/4, $libc_mkdir_trampoline<>(SB)
TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkdirat_trampoline_addr(SB)/4, $libc_mkdirat_trampoline<>(SB)
TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkfifo_trampoline_addr(SB)/4, $libc_mkfifo_trampoline<>(SB)
TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifoat(SB)
GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $4
DATA ·libc_mkfifoat_trampoline_addr(SB)/4, $libc_mkfifoat_trampoline<>(SB)
TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $4
DATA ·libc_mknod_trampoline_addr(SB)/4, $libc_mknod_trampoline<>(SB)
TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknodat(SB)
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $4
DATA ·libc_mknodat_trampoline_addr(SB)/4, $libc_mknodat_trampoline<>(SB)
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $4
DATA ·libc_nanosleep_trampoline_addr(SB)/4, $libc_nanosleep_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)
GLOBL ·libc_open_trampoline_addr(SB), RODATA, $4
DATA ·libc_open_trampoline_addr(SB)/4, $libc_open_trampoline<>(SB)
TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $4
DATA ·libc_openat_trampoline_addr(SB)/4, $libc_openat_trampoline<>(SB)
TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $4
DATA ·libc_pathconf_trampoline_addr(SB)/4, $libc_pathconf_trampoline<>(SB)
TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $4
DATA ·libc_pread_trampoline_addr(SB)/4, $libc_pread_trampoline<>(SB)
TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $4
DATA ·libc_pwrite_trampoline_addr(SB)/4, $libc_pwrite_trampoline<>(SB)
TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_read(SB)
GLOBL ·libc_read_trampoline_addr(SB), RODATA, $4
DATA ·libc_read_trampoline_addr(SB)/4, $libc_read_trampoline<>(SB)
TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $4
DATA ·libc_readlink_trampoline_addr(SB)/4, $libc_readlink_trampoline<>(SB)
TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_readlinkat_trampoline_addr(SB)/4, $libc_readlinkat_trampoline<>(SB)
TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $4
DATA ·libc_rename_trampoline_addr(SB)/4, $libc_rename_trampoline<>(SB)
TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $4
DATA ·libc_renameat_trampoline_addr(SB)/4, $libc_renameat_trampoline<>(SB)
TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $4
DATA ·libc_revoke_trampoline_addr(SB)/4, $libc_revoke_trampoline<>(SB)
TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $4
DATA ·libc_rmdir_trampoline_addr(SB)/4, $libc_rmdir_trampoline<>(SB)
TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $4
DATA ·libc_lseek_trampoline_addr(SB)/4, $libc_lseek_trampoline<>(SB)
TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_select(SB)
GLOBL ·libc_select_trampoline_addr(SB), RODATA, $4
DATA ·libc_select_trampoline_addr(SB)/4, $libc_select_trampoline<>(SB)
TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setegid_trampoline_addr(SB)/4, $libc_setegid_trampoline<>(SB)
TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_seteuid_trampoline_addr(SB)/4, $libc_seteuid_trampoline<>(SB)
TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setgid_trampoline_addr(SB)/4, $libc_setgid_trampoline<>(SB)
TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $4
DATA ·libc_setlogin_trampoline_addr(SB)/4, $libc_setlogin_trampoline<>(SB)
TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setpgid_trampoline_addr(SB)/4, $libc_setpgid_trampoline<>(SB)
TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $4
DATA ·libc_setpriority_trampoline_addr(SB)/4, $libc_setpriority_trampoline<>(SB)
TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setregid_trampoline_addr(SB)/4, $libc_setregid_trampoline<>(SB)
TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setreuid_trampoline_addr(SB)/4, $libc_setreuid_trampoline<>(SB)
TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresgid(SB)
GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setresgid_trampoline_addr(SB)/4, $libc_setresgid_trampoline<>(SB)
TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresuid(SB)
GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setresuid_trampoline_addr(SB)/4, $libc_setresuid_trampoline<>(SB)
TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $4
DATA ·libc_setrlimit_trampoline_addr(SB)/4, $libc_setrlimit_trampoline<>(SB)
TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrtable(SB)
GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $4
DATA ·libc_setrtable_trampoline_addr(SB)/4, $libc_setrtable_trampoline<>(SB)
TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setsid_trampoline_addr(SB)/4, $libc_setsid_trampoline<>(SB)
TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $4
DATA ·libc_settimeofday_trampoline_addr(SB)/4, $libc_settimeofday_trampoline<>(SB)
TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $4
DATA ·libc_setuid_trampoline_addr(SB)/4, $libc_setuid_trampoline<>(SB)
TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_stat(SB)
GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $4
DATA ·libc_stat_trampoline_addr(SB)/4, $libc_stat_trampoline<>(SB)
TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_statfs(SB)
GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $4
DATA ·libc_statfs_trampoline_addr(SB)/4, $libc_statfs_trampoline<>(SB)
TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $4
DATA ·libc_symlink_trampoline_addr(SB)/4, $libc_symlink_trampoline<>(SB)
TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_symlinkat_trampoline_addr(SB)/4, $libc_symlinkat_trampoline<>(SB)
TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $4
DATA ·libc_sync_trampoline_addr(SB)/4, $libc_sync_trampoline<>(SB)
TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $4
DATA ·libc_truncate_trampoline_addr(SB)/4, $libc_truncate_trampoline<>(SB)
TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $4
DATA ·libc_umask_trampoline_addr(SB)/4, $libc_umask_trampoline<>(SB)
TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $4
DATA ·libc_unlink_trampoline_addr(SB)/4, $libc_unlink_trampoline<>(SB)
TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $4
DATA ·libc_unlinkat_trampoline_addr(SB)/4, $libc_unlinkat_trampoline<>(SB)
TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $4
DATA ·libc_unmount_trampoline_addr(SB)/4, $libc_unmount_trampoline<>(SB)
TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_write(SB)
GLOBL ·libc_write_trampoline_addr(SB), RODATA, $4
DATA ·libc_write_trampoline_addr(SB)/4, $libc_write_trampoline<>(SB)
TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $4
DATA ·libc_mmap_trampoline_addr(SB)/4, $libc_mmap_trampoline<>(SB)
TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $4
DATA ·libc_munmap_trampoline_addr(SB)/4, $libc_munmap_trampoline<>(SB)
TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimensat(SB)
GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $4
DATA ·libc_utimensat_trampoline_addr(SB)/4, $libc_utimensat_trampoline<>(SB)

File diff suppressed because it is too large Load diff

796
vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s generated vendored Normal file
View file

@ -0,0 +1,796 @@
// go run mkasm.go openbsd amd64
// Code generated by the command above; DO NOT EDIT.
#include "textflag.h"
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8
DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB)
TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8
DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB)
TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8
DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB)
TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8
DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB)
TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8
DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB)
TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8
DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB)
TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8
DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB)
TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB)
TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8
DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB)
TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB)
TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB)
TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8
DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB)
TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8
DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB)
TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8
DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB)
TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8
DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB)
TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8
DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB)
TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8
DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB)
TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8
DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB)
TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8
DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB)
TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8
DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB)
TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8
DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB)
TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8
DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB)
TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8
DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB)
TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB)
TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8
DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB)
TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8
DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB)
TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB)
TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe2(SB)
GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8
DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB)
TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getdents(SB)
GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8
DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB)
TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getcwd(SB)
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8
DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB)
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8
DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB)
TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_access(SB)
GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8
DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB)
TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8
DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB)
TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB)
TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8
DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB)
TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8
DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB)
TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8
DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB)
TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8
DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB)
TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_close(SB)
GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8
DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB)
TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB)
TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB)
TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup3(SB)
GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB)
TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8
DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB)
TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8
DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB)
TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB)
TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB)
TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB)
TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB)
TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB)
TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB)
TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8
DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB)
TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8
DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB)
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB)
TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatat(SB)
GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB)
TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatfs(SB)
GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB)
TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8
DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB)
TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8
DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB)
TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB)
TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB)
TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB)
TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB)
TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB)
TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB)
TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB)
TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB)
TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB)
TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrtable(SB)
GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB)
TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB)
TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB)
TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8
DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB)
TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB)
TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8
DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB)
TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8
DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB)
TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8
DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB)
TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8
DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB)
TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_link(SB)
GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8
DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB)
TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB)
TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8
DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB)
TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lstat(SB)
GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8
DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB)
TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB)
TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB)
TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB)
TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifoat(SB)
GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB)
TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknodat(SB)
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB)
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8
DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)
GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8
DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB)
TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8
DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB)
TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8
DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB)
TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8
DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB)
TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8
DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB)
TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_read(SB)
GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8
DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB)
TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB)
TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB)
TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8
DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB)
TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8
DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB)
TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8
DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB)
TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB)
TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8
DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB)
TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_select(SB)
GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8
DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB)
TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB)
TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB)
TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB)
TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8
DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB)
TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB)
TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8
DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB)
TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB)
TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB)
TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresgid(SB)
GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB)
TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresuid(SB)
GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB)
TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8
DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB)
TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrtable(SB)
GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8
DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB)
TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB)
TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8
DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB)
TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB)
TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_stat(SB)
GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8
DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB)
TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_statfs(SB)
GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8
DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB)
TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB)
TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB)
TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8
DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB)
TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8
DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB)
TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8
DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB)
TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB)
TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB)
TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8
DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB)
TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_write(SB)
GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8
DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB)
TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB)
TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimensat(SB)
GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8
DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)

File diff suppressed because it is too large Load diff

796
vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s generated vendored Normal file
View file

@ -0,0 +1,796 @@
// go run mkasm.go openbsd arm64
// Code generated by the command above; DO NOT EDIT.
#include "textflag.h"
TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgroups(SB)
GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8
DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB)
TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgroups(SB)
GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8
DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB)
TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_wait4(SB)
GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8
DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB)
TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_accept(SB)
GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8
DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB)
TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_bind(SB)
GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8
DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB)
TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_connect(SB)
GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8
DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB)
TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socket(SB)
GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8
DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB)
TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockopt(SB)
GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB)
TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsockopt(SB)
GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8
DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB)
TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpeername(SB)
GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB)
TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsockname(SB)
GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB)
TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_shutdown(SB)
GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8
DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB)
TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_socketpair(SB)
GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8
DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB)
TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvfrom(SB)
GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8
DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB)
TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendto(SB)
GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8
DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB)
TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_recvmsg(SB)
GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8
DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB)
TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendmsg(SB)
GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8
DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB)
TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kevent(SB)
GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8
DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB)
TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimes(SB)
GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8
DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB)
TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_futimes(SB)
GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8
DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB)
TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_poll(SB)
GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8
DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB)
TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_madvise(SB)
GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8
DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB)
TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlock(SB)
GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8
DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB)
TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mlockall(SB)
GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB)
TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mprotect(SB)
GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8
DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB)
TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_msync(SB)
GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8
DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB)
TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlock(SB)
GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB)
TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munlockall(SB)
GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8
DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB)
TEXT libc_pipe2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pipe2(SB)
GLOBL ·libc_pipe2_trampoline_addr(SB), RODATA, $8
DATA ·libc_pipe2_trampoline_addr(SB)/8, $libc_pipe2_trampoline<>(SB)
TEXT libc_getdents_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getdents(SB)
GLOBL ·libc_getdents_trampoline_addr(SB), RODATA, $8
DATA ·libc_getdents_trampoline_addr(SB)/8, $libc_getdents_trampoline<>(SB)
TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getcwd(SB)
GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8
DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB)
TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ioctl(SB)
GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB)
TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sysctl(SB)
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ppoll(SB)
GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8
DATA ·libc_ppoll_trampoline_addr(SB)/8, $libc_ppoll_trampoline<>(SB)
TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_access(SB)
GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8
DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB)
TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_adjtime(SB)
GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8
DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB)
TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chdir(SB)
GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB)
TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chflags(SB)
GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8
DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB)
TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chmod(SB)
GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8
DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB)
TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chown(SB)
GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8
DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB)
TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_chroot(SB)
GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8
DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB)
TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_close(SB)
GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8
DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB)
TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup(SB)
GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB)
TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup2(SB)
GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB)
TEXT libc_dup3_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_dup3(SB)
GLOBL ·libc_dup3_trampoline_addr(SB), RODATA, $8
DATA ·libc_dup3_trampoline_addr(SB)/8, $libc_dup3_trampoline<>(SB)
TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_exit(SB)
GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8
DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB)
TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_faccessat(SB)
GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8
DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB)
TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchdir(SB)
GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB)
TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchflags(SB)
GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB)
TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmod(SB)
GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB)
TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchmodat(SB)
GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB)
TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchown(SB)
GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB)
TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fchownat(SB)
GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB)
TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_flock(SB)
GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8
DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB)
TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fpathconf(SB)
GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8
DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB)
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB)
TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatat(SB)
GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB)
TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstatfs(SB)
GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8
DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB)
TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fsync(SB)
GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8
DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB)
TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_ftruncate(SB)
GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8
DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB)
TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getegid(SB)
GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB)
TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_geteuid(SB)
GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB)
TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getgid(SB)
GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB)
TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgid(SB)
GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB)
TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpgrp(SB)
GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB)
TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpid(SB)
GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB)
TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getppid(SB)
GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB)
TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getpriority(SB)
GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8
DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB)
TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrlimit(SB)
GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB)
TEXT libc_getrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrtable(SB)
GLOBL ·libc_getrtable_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrtable_trampoline_addr(SB)/8, $libc_getrtable_trampoline<>(SB)
TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getrusage(SB)
GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8
DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB)
TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getsid(SB)
GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB)
TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_gettimeofday(SB)
GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8
DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB)
TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_getuid(SB)
GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB)
TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_issetugid(SB)
GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8
DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB)
TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kill(SB)
GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8
DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB)
TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_kqueue(SB)
GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8
DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB)
TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lchown(SB)
GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8
DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB)
TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_link(SB)
GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8
DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB)
TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_linkat(SB)
GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB)
TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_listen(SB)
GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8
DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB)
TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lstat(SB)
GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8
DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB)
TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdir(SB)
GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB)
TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkdirat(SB)
GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB)
TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifo(SB)
GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB)
TEXT libc_mkfifoat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mkfifoat(SB)
GLOBL ·libc_mkfifoat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mkfifoat_trampoline_addr(SB)/8, $libc_mkfifoat_trampoline<>(SB)
TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknod(SB)
GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB)
TEXT libc_mknodat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mknodat(SB)
GLOBL ·libc_mknodat_trampoline_addr(SB), RODATA, $8
DATA ·libc_mknodat_trampoline_addr(SB)/8, $libc_mknodat_trampoline<>(SB)
TEXT libc_nanosleep_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_nanosleep(SB)
GLOBL ·libc_nanosleep_trampoline_addr(SB), RODATA, $8
DATA ·libc_nanosleep_trampoline_addr(SB)/8, $libc_nanosleep_trampoline<>(SB)
TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_open(SB)
GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8
DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB)
TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_openat(SB)
GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8
DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB)
TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pathconf(SB)
GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8
DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB)
TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pread(SB)
GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8
DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB)
TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_pwrite(SB)
GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8
DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB)
TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_read(SB)
GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8
DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB)
TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlink(SB)
GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB)
TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_readlinkat(SB)
GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB)
TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rename(SB)
GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8
DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB)
TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_renameat(SB)
GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8
DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB)
TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_revoke(SB)
GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8
DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB)
TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_rmdir(SB)
GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8
DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB)
TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_lseek(SB)
GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8
DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB)
TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_select(SB)
GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8
DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB)
TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setegid(SB)
GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB)
TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_seteuid(SB)
GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB)
TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setgid(SB)
GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB)
TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setlogin(SB)
GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8
DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB)
TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpgid(SB)
GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB)
TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setpriority(SB)
GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8
DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB)
TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setregid(SB)
GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB)
TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setreuid(SB)
GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB)
TEXT libc_setresgid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresgid(SB)
GLOBL ·libc_setresgid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setresgid_trampoline_addr(SB)/8, $libc_setresgid_trampoline<>(SB)
TEXT libc_setresuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setresuid(SB)
GLOBL ·libc_setresuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setresuid_trampoline_addr(SB)/8, $libc_setresuid_trampoline<>(SB)
TEXT libc_setrlimit_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrlimit(SB)
GLOBL ·libc_setrlimit_trampoline_addr(SB), RODATA, $8
DATA ·libc_setrlimit_trampoline_addr(SB)/8, $libc_setrlimit_trampoline<>(SB)
TEXT libc_setrtable_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setrtable(SB)
GLOBL ·libc_setrtable_trampoline_addr(SB), RODATA, $8
DATA ·libc_setrtable_trampoline_addr(SB)/8, $libc_setrtable_trampoline<>(SB)
TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setsid(SB)
GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB)
TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_settimeofday(SB)
GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8
DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB)
TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_setuid(SB)
GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8
DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB)
TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_stat(SB)
GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8
DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB)
TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_statfs(SB)
GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8
DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB)
TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlink(SB)
GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB)
TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_symlinkat(SB)
GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB)
TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sync(SB)
GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8
DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB)
TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_truncate(SB)
GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8
DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB)
TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_umask(SB)
GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8
DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB)
TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlink(SB)
GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8
DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB)
TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unlinkat(SB)
GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8
DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB)
TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_unmount(SB)
GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8
DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB)
TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_write(SB)
GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8
DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB)
TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_mmap(SB)
GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB)
TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_munmap(SB)
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_utimensat(SB)
GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8
DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB)

View file

@ -6,6 +6,7 @@
package unix
// Deprecated: Use libc wrappers instead of direct syscalls.
const (
SYS_EXIT = 1 // { void sys_exit(int rval); }
SYS_FORK = 2 // { int sys_fork(void); }

View file

@ -6,6 +6,7 @@
package unix
// Deprecated: Use libc wrappers instead of direct syscalls.
const (
SYS_EXIT = 1 // { void sys_exit(int rval); }
SYS_FORK = 2 // { int sys_fork(void); }

View file

@ -6,6 +6,7 @@
package unix
// Deprecated: Use libc wrappers instead of direct syscalls.
const (
SYS_EXIT = 1 // { void sys_exit(int rval); }
SYS_FORK = 2 // { int sys_fork(void); }

View file

@ -382,11 +382,11 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool {
// For example, if your response has a "NextPageToken" and a slice of "Items" with "Id" fields,
// you could request just those fields like this:
//
// svc.Events.List().Fields("nextPageToken", "items/id").Do()
// svc.Events.List().Fields("nextPageToken", "items/id").Do()
//
// or if you were also interested in each Item's "Updated" field, you can combine them like this:
//
// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do()
// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do()
//
// Another way to find field names is through the Google API explorer:
// https://developers.google.com/apis-explorer/#p/

View file

@ -7,7 +7,7 @@
//
// This package is DEPRECATED. Users should instead use,
//
// service, err := NewService(..., option.WithAPIKey(...))
// service, err := NewService(..., option.WithAPIKey(...))
package transport
import (

View file

@ -8,31 +8,31 @@
//
// For product documentation, see: https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials
//
// Creating a client
// # Creating a client
//
// Usage example:
//
// import "google.golang.org/api/iamcredentials/v1"
// ...
// ctx := context.Background()
// iamcredentialsService, err := iamcredentials.NewService(ctx)
// import "google.golang.org/api/iamcredentials/v1"
// ...
// ctx := context.Background()
// iamcredentialsService, err := iamcredentials.NewService(ctx)
//
// In this example, Google Application Default Credentials are used for authentication.
//
// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.
//
// Other authentication options
// # Other authentication options
//
// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:
//
// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithAPIKey("AIza..."))
// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithAPIKey("AIza..."))
//
// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:
//
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
// config := &oauth2.Config{...}
// // ...
// token, err := config.Exchange(ctx, ...)
// iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))
//
// See https://godoc.org/google.golang.org/api/option/ for details on options.
package iamcredentials // import "google.golang.org/api/iamcredentials/v1"
@ -519,11 +519,11 @@ type ProjectsServiceAccountsGenerateAccessTokenCall struct {
// GenerateAccessToken: Generates an OAuth 2.0 access token for a
// service account.
//
// - name: The resource name of the service account for which the
// credentials are requested, in the following format:
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
// wildcard character is required; replacing it with a project ID is
// invalid.
// - name: The resource name of the service account for which the
// credentials are requested, in the following format:
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
// wildcard character is required; replacing it with a project ID is
// invalid.
func (r *ProjectsServiceAccountsService) GenerateAccessToken(name string, generateaccesstokenrequest *GenerateAccessTokenRequest) *ProjectsServiceAccountsGenerateAccessTokenCall {
c := &ProjectsServiceAccountsGenerateAccessTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
@ -666,11 +666,11 @@ type ProjectsServiceAccountsGenerateIdTokenCall struct {
// GenerateIdToken: Generates an OpenID Connect ID token for a service
// account.
//
// - name: The resource name of the service account for which the
// credentials are requested, in the following format:
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
// wildcard character is required; replacing it with a project ID is
// invalid.
// - name: The resource name of the service account for which the
// credentials are requested, in the following format:
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
// wildcard character is required; replacing it with a project ID is
// invalid.
func (r *ProjectsServiceAccountsService) GenerateIdToken(name string, generateidtokenrequest *GenerateIdTokenRequest) *ProjectsServiceAccountsGenerateIdTokenCall {
c := &ProjectsServiceAccountsGenerateIdTokenCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
@ -813,11 +813,11 @@ type ProjectsServiceAccountsSignBlobCall struct {
// SignBlob: Signs a blob using a service account's system-managed
// private key.
//
// - name: The resource name of the service account for which the
// credentials are requested, in the following format:
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
// wildcard character is required; replacing it with a project ID is
// invalid.
// - name: The resource name of the service account for which the
// credentials are requested, in the following format:
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
// wildcard character is required; replacing it with a project ID is
// invalid.
func (r *ProjectsServiceAccountsService) SignBlob(name string, signblobrequest *SignBlobRequest) *ProjectsServiceAccountsSignBlobCall {
c := &ProjectsServiceAccountsSignBlobCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name
@ -960,11 +960,11 @@ type ProjectsServiceAccountsSignJwtCall struct {
// SignJwt: Signs a JWT using a service account's system-managed private
// key.
//
// - name: The resource name of the service account for which the
// credentials are requested, in the following format:
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
// wildcard character is required; replacing it with a project ID is
// invalid.
// - name: The resource name of the service account for which the
// credentials are requested, in the following format:
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
// wildcard character is required; replacing it with a project ID is
// invalid.
func (r *ProjectsServiceAccountsService) SignJwt(name string, signjwtrequest *SignJwtRequest) *ProjectsServiceAccountsSignJwtCall {
c := &ProjectsServiceAccountsSignJwtCall{s: r.s, urlParams_: make(gensupport.URLParams)}
c.name = name

View file

@ -70,11 +70,12 @@ const (
//
// - A self-signed JWT flow will be executed if the following conditions are
// met:
// (1) At least one of the following is true:
// (a) No scope is provided
// (b) Scope for self-signed JWT flow is enabled
// (c) Audiences are explicitly provided by users
// (2) No service account impersontation
//
// (1) At least one of the following is true:
// (a) No scope is provided
// (b) Scope for self-signed JWT flow is enabled
// (c) Audiences are explicitly provided by users
// (2) No service account impersontation
//
// - Otherwise, executes standard OAuth 2.0 flow
// More details: google.aip.dev/auth/4111

View file

@ -13,9 +13,10 @@ import (
// MarshalJSON returns a JSON encoding of schema containing only selected fields.
// A field is selected if any of the following is true:
// * it has a non-empty value
// * its field name is present in forceSendFields and it is not a nil pointer or nil interface
// * its field name is present in nullFields.
// - it has a non-empty value
// - its field name is present in forceSendFields and it is not a nil pointer or nil interface
// - its field name is present in nullFields.
//
// The JSON key for each selected field is taken from the field's json: struct tag.
func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) {
if len(forceSendFields) == 0 && len(nullFields) == 0 {

View file

@ -37,7 +37,7 @@ func (u URLParams) SetMulti(key string, values []string) {
u[key] = values
}
// Encode encodes the values into ``URL encoded'' form
// Encode encodes the values into “URL encoded” form
// ("bar=baz&foo=quux") sorted by key.
func (u URLParams) Encode() string {
return url.Values(u).Encode()

View file

@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
const Version = "0.91.0"
const Version = "0.92.0"

View file

@ -287,10 +287,10 @@ func (w withClientCertSource) Apply(o *internal.DialSettings) {
// service account SA2 while using delegate service accounts DSA1 and DSA2,
// the following must be true:
//
// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on
// DSA1.
// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2.
// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2.
// 1. Base service account SA1 has roles/iam.serviceAccountTokenCreator on
// DSA1.
// 2. DSA1 has roles/iam.serviceAccountTokenCreator on DSA2.
// 3. DSA2 has roles/iam.serviceAccountTokenCreator on target SA2.
//
// The resulting impersonated credential will either have the default scopes of
// the client being instantiating or the scopes from WithScopes if provided.

Some files were not shown because too many files have changed in this diff Show more