vendor: make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2022-10-07 01:01:21 +03:00
parent 285e92706d
commit 0cea525456
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
143 changed files with 14403 additions and 13362 deletions

14
go.mod
View file

@ -4,7 +4,7 @@ go 1.19
require ( require (
cloud.google.com/go/storage v1.27.0 cloud.google.com/go/storage v1.27.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.0
github.com/VictoriaMetrics/fastcache v1.12.0 github.com/VictoriaMetrics/fastcache v1.12.0
// Do not use the original github.com/valyala/fasthttp because of issues // Do not use the original github.com/valyala/fasthttp because of issues
@ -29,9 +29,9 @@ require (
github.com/valyala/fasttemplate v1.2.1 github.com/valyala/fasttemplate v1.2.1
github.com/valyala/gozstd v1.17.0 github.com/valyala/gozstd v1.17.0
github.com/valyala/quicktemplate v1.7.0 github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.0.0-20220930213112-107f3e3c3b0b golang.org/x/net v0.0.0-20221004154528-8021a29435af
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec golang.org/x/sys v0.0.0-20221006211917-84dc82d7e875
google.golang.org/api v0.98.0 google.golang.org/api v0.98.0
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
) )
@ -40,8 +40,8 @@ require (
cloud.google.com/go v0.104.0 // indirect cloud.google.com/go v0.104.0 // indirect
cloud.google.com/go/compute v1.10.0 // indirect cloud.google.com/go/compute v1.10.0 // indirect
cloud.google.com/go/iam v0.5.0 // indirect cloud.google.com/go/iam v0.5.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.12.21 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.12.21 // indirect
@ -93,6 +93,6 @@ require (
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220930163606-c98284e70a91 // indirect google.golang.org/genproto v0.0.0-20220930163606-c98284e70a91 // indirect
google.golang.org/grpc v1.49.0 // indirect google.golang.org/grpc v1.50.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect google.golang.org/protobuf v1.28.1 // indirect
) )

32
go.sum
View file

@ -68,13 +68,13 @@ collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible h1:+t2P1j1r5N6lYgPiiz7ZbEVZFkWjVe9WhHbMm0gg8hw= github.com/Azure/azure-sdk-for-go v48.2.0+incompatible h1:+t2P1j1r5N6lYgPiiz7ZbEVZFkWjVe9WhHbMm0gg8hw=
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0 h1:sVPhtT2qjO86rTUaWMr4WoES4TkjGnzcioXcnHV9s5k= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4 h1:pqrAR74b6EoR4kcxF7L7Wg2B8Jgil9UUZtMvxhEFqWo=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.1.4/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0 h1:Yoicul8bnVdQrhDMTHxdEckRGX01XvwXDHUT9zYZ3k0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY= github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1 h1:XUNQ4mw+zJmaA2KXzP9JlQiecy1SI+Eog7xVkPiqIbg=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.0 h1:fe+kSd9btgTTeHeUlMTyEsjoe6L/zd+Q61iWEMPwHmc=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.0/go.mod h1:T7nxmZ9i42Dqy7kwnn8AZYNjqxd4TloKXdIbhosHSqo=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
@ -96,7 +96,7 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 h1:BWe8a+f/t+7KY7zH2mqygeUD0t8hNFXe08p1Pb3/jKE=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
@ -1065,8 +1065,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20220930213112-107f3e3c3b0b h1:uKO3Js8lXGjpjdc4J3rqs0/Ex5yDKUGfk43tTYWVLas= golang.org/x/net v0.0.0-20221004154528-8021a29435af h1:wv66FM3rLZGPdxpYL+ApnDe2HzHcTFta3z5nsc13wI4=
golang.org/x/net v0.0.0-20220930213112-107f3e3c3b0b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= golang.org/x/net v0.0.0-20221004154528-8021a29435af/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1088,8 +1088,8 @@ golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1 h1:lxqLZaMad/dJHMFZH0NiNpiEZI/nhgWhe4wgzpE+MuA= golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1 h1:3VPzK7eqH25j7GYw5w6g/GzNRc0/fYtrxz27z1gD4W0=
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1200,8 +1200,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec h1:BkDtF2Ih9xZ7le9ndzTA7KJow28VbQW3odyk/8drmuI= golang.org/x/sys v0.0.0-20221006211917-84dc82d7e875 h1:AzgQNqF+FKwyQ5LbVrVqOcuuFB67N47F9+htZYH0wFM=
golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221006211917-84dc82d7e875/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1479,8 +1479,8 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= google.golang.org/grpc v1.50.0 h1:fPVVDxY9w++VjTZsYvXWqEf9Rqar/e+9zYfxKK+W+YU=
google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=

View file

@ -1,5 +1,35 @@
# Release History # Release History
## 1.1.4 (2022-10-06)
### Bugs Fixed
* Don't retry a request if the `Retry-After` delay is greater than the configured `RetryOptions.MaxRetryDelay`.
* `runtime.JoinPaths`: do not unconditionally add a forward slash before the query string
### Other Changes
* Removed logging URL from retry policy as it's redundant.
* Retry policy logs when it exits due to a non-retriable status code.
## 1.1.3 (2022-09-01)
### Bugs Fixed
* Adjusted the initial retry delay to 800ms per the Azure SDK guidelines.
## 1.1.2 (2022-08-09)
### Other Changes
* Fixed various doc bugs.
## 1.1.1 (2022-06-30)
### Bugs Fixed
* Avoid polling when a RELO LRO synchronously terminates.
## 1.1.0 (2022-06-03)
### Other Changes
* The one-second floor for `Frequency` when calling `PollUntilDone()` has been removed when running tests.
## 1.0.0 (2022-05-12) ## 1.0.0 (2022-05-12)
### Features Added ### Features Added

View file

@ -10,11 +10,11 @@ Package azcore implements an HTTP request/response middleware pipeline used by A
The middleware consists of three components. The middleware consists of three components.
- One or more Policy instances. - One or more Policy instances.
- A Transporter instance. - A Transporter instance.
- A Pipeline instance that combines the Policy and Transporter instances. - A Pipeline instance that combines the Policy and Transporter instances.
Implementing the Policy Interface # Implementing the Policy Interface
A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as
a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share
@ -34,53 +34,53 @@ and error instances to its caller.
Template for implementing a stateless Policy: Template for implementing a stateless Policy:
type policyFunc func(*policy.Request) (*http.Response, error) type policyFunc func(*policy.Request) (*http.Response, error)
// Do implements the Policy interface on policyFunc.
func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { // Do implements the Policy interface on policyFunc.
return pf(req) func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) {
} return pf(req)
}
func NewMyStatelessPolicy() policy.Policy { func NewMyStatelessPolicy() policy.Policy {
return policyFunc(func(req *policy.Request) (*http.Response, error) { return policyFunc(func(req *policy.Request) (*http.Response, error) {
// TODO: mutate/process Request here // TODO: mutate/process Request here
// forward Request to next Policy & get Response/error // forward Request to next Policy & get Response/error
resp, err := req.Next() resp, err := req.Next()
// TODO: mutate/process Response/error here // TODO: mutate/process Response/error here
// return Response/error to previous Policy // return Response/error to previous Policy
return resp, err return resp, err
}) })
} }
Template for implementing a stateful Policy: Template for implementing a stateful Policy:
type MyStatefulPolicy struct { type MyStatefulPolicy struct {
// TODO: add configuration/setting fields here // TODO: add configuration/setting fields here
} }
// TODO: add initialization args to NewMyStatefulPolicy() // TODO: add initialization args to NewMyStatefulPolicy()
func NewMyStatefulPolicy() policy.Policy { func NewMyStatefulPolicy() policy.Policy {
return &MyStatefulPolicy{ return &MyStatefulPolicy{
// TODO: initialize configuration/setting fields here // TODO: initialize configuration/setting fields here
} }
} }
func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
// TODO: mutate/process Request here // TODO: mutate/process Request here
// forward Request to next Policy & get Response/error // forward Request to next Policy & get Response/error
resp, err := req.Next() resp, err := req.Next()
// TODO: mutate/process Response/error here // TODO: mutate/process Response/error here
// return Response/error to previous Policy // return Response/error to previous Policy
return resp, err return resp, err
} }
Implementing the Transporter Interface # Implementing the Transporter Interface
The Transporter interface is responsible for sending the HTTP request and returning the corresponding The Transporter interface is responsible for sending the HTTP request and returning the corresponding
HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter
@ -88,66 +88,66 @@ implementation uses a shared http.Client from the standard library.
The same stateful/stateless rules for Policy implementations apply to Transporter implementations. The same stateful/stateless rules for Policy implementations apply to Transporter implementations.
Using Policy and Transporter Instances Via a Pipeline # Using Policy and Transporter Instances Via a Pipeline
To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function.
func NewPipeline(transport Transporter, policies ...Policy) Pipeline func NewPipeline(transport Transporter, policies ...Policy) Pipeline
The specified Policy instances form a chain and are invoked in the order provided to NewPipeline The specified Policy instances form a chain and are invoked in the order provided to NewPipeline
followed by the Transporter. followed by the Transporter.
Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method.
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
func (p Pipeline) Do(req *Request) (*http.Request, error) func (p Pipeline) Do(req *Request) (*http.Request, error)
The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter
instances. The response/error is then sent through the same chain of Policy instances in reverse instances. The response/error is then sent through the same chain of Policy instances in reverse
order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with
TransportA. TransportA.
pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC)
The flow of Request and Response looks like the following: The flow of Request and Response looks like the following:
policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+
| |
HTTP(S) endpoint HTTP(S) endpoint
| |
caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+
Creating a Request Instance # Creating a Request Instance
The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also
contains some internal state and provides various convenience methods. You create a Request instance contains some internal state and provides various convenience methods. You create a Request instance
by calling the runtime.NewRequest function: by calling the runtime.NewRequest function:
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error)
If the Request should contain a body, call the SetBody method. If the Request should contain a body, call the SetBody method.
func (req *Request) SetBody(body ReadSeekCloser, contentType string) error func (req *Request) SetBody(body ReadSeekCloser, contentType string) error
A seekable stream is required so that upon retry, the retry Policy instance can seek the stream A seekable stream is required so that upon retry, the retry Policy instance can seek the stream
back to the beginning before retrying the network request and re-uploading the body. back to the beginning before retrying the network request and re-uploading the body.
Sending an Explicit Null # Sending an Explicit Null
Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted.
{ {
"delete-me": null "delete-me": null
} }
This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as
a means to resolve the ambiguity between a field to be excluded and its zero-value. a means to resolve the ambiguity between a field to be excluded and its zero-value.
type Widget struct { type Widget struct {
Name *string `json:",omitempty"` Name *string `json:",omitempty"`
Count *int `json:",omitempty"` Count *int `json:",omitempty"`
} }
In the above example, Name and Count are defined as pointer-to-type to disambiguate between In the above example, Name and Count are defined as pointer-to-type to disambiguate between
a missing value (nil) and a zero-value (0) which might have semantic differences. a missing value (nil) and a zero-value (0) which might have semantic differences.
@ -157,18 +157,18 @@ a Widget's count, one simply specifies the new value for Count, leaving Name nil
To fulfill the requirement for sending a JSON null, the NullValue() function can be used. To fulfill the requirement for sending a JSON null, the NullValue() function can be used.
w := Widget{ w := Widget{
Count: azcore.NullValue[*int](), Count: azcore.NullValue[*int](),
} }
This sends an explict "null" for Count, indicating that any current value for Count should be deleted. This sends an explict "null" for Count, indicating that any current value for Count should be deleted.
Processing the Response # Processing the Response
When the HTTP response is received, the *http.Response is returned directly. Each Policy instance When the HTTP response is received, the *http.Response is returned directly. Each Policy instance
can inspect/mutate the *http.Response. can inspect/mutate the *http.Response.
Built-in Logging # Built-in Logging
To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program.
@ -178,40 +178,40 @@ own synchronization to handle concurrent invocations.
See the docs for the log package for further details. See the docs for the log package for further details.
Pageable Operations # Pageable Operations
Pageable operations return potentially large data sets spread over multiple GET requests. The result of Pageable operations return potentially large data sets spread over multiple GET requests. The result of
each GET is a "page" of data consisting of a slice of items. each GET is a "page" of data consisting of a slice of items.
Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T].
func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse]
The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages
and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked.
pager := widgetClient.NewListWidgetsPager(nil) pager := widgetClient.NewListWidgetsPager(nil)
for pager.More() { for pager.More() {
page, err := pager.NextPage(context.TODO()) page, err := pager.NextPage(context.TODO())
// handle err // handle err
for _, widget := range page.Values { for _, widget := range page.Values {
// process widget // process widget
} }
} }
Long-Running Operations # Long-Running Operations
Long-running operations (LROs) are operations consisting of an initial request to start the operation followed Long-running operations (LROs) are operations consisting of an initial request to start the operation followed
by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one
of the following values. of the following values.
* Succeeded - the LRO completed successfully - Succeeded - the LRO completed successfully
* Failed - the LRO failed to complete - Failed - the LRO failed to complete
* Canceled - the LRO was canceled - Canceled - the LRO was canceled
LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T].
func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error)
When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started.
It does _not_ mean that the widget has been created or updated (or failed to be created/updated). It does _not_ mean that the widget has been created or updated (or failed to be created/updated).
@ -219,11 +219,11 @@ It does _not_ mean that the widget has been created or updated (or failed to be
The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete,
call the PollUntilDone() method. call the PollUntilDone() method.
poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil)
// handle err // handle err
result, err := poller.PollUntilDone(context.TODO(), nil) result, err := poller.PollUntilDone(context.TODO(), nil)
// handle err // handle err
// use result // use result
The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the
context is canceled/timed out. context is canceled/timed out.
@ -232,22 +232,22 @@ Note that LROs can take anywhere from several seconds to several minutes. The d
this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation
mechanism as required. mechanism as required.
Resume Tokens # Resume Tokens
Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to
recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method.
token, err := poller.ResumeToken() token, err := poller.ResumeToken()
// handle error // handle error
Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls
to poller.Poll() might change the poller's state. In this case, a new token should be created. to poller.Poll() might change the poller's state. In this case, a new token should be created.
After the token has been obtained, it can be used to recreate an instance of the originating poller. After the token has been obtained, it can be used to recreate an instance of the originating poller.
poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{
ResumeToken: token, ResumeToken: token,
}) })
When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken.

View file

@ -8,7 +8,6 @@ package exported
import ( import (
"io" "io"
"io/ioutil"
"net/http" "net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared"
@ -51,7 +50,7 @@ func Payload(resp *http.Response) ([]byte, error) {
if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok { if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok {
return buf.Bytes(), nil return buf.Bytes(), nil
} }
bytesBody, err := ioutil.ReadAll(resp.Body) bytesBody, err := io.ReadAll(resp.Body)
resp.Body.Close() resp.Body.Close()
if err != nil { if err != nil {
return nil, err return nil, err

View file

@ -71,6 +71,13 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
if !pollers.IsValidURL(asyncURL) { if !pollers.IsValidURL(asyncURL) {
return nil, fmt.Errorf("invalid polling URL %s", asyncURL) return nil, fmt.Errorf("invalid polling URL %s", asyncURL)
} }
// check for provisioning state. if the operation is a RELO
// and terminates synchronously this will prevent extra polling.
// it's ok if there's no provisioning state.
state, _ := pollers.GetProvisioningState(resp)
if state == "" {
state = pollers.StatusInProgress
}
p := &Poller[T]{ p := &Poller[T]{
pl: pl, pl: pl,
resp: resp, resp: resp,
@ -79,7 +86,7 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
OrigURL: resp.Request.URL.String(), OrigURL: resp.Request.URL.String(),
Method: resp.Request.Method, Method: resp.Request.Method,
FinalState: finalState, FinalState: finalState,
CurState: pollers.StatusInProgress, CurState: state,
} }
return p, nil return p, nil
} }

View file

@ -64,12 +64,19 @@ func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) {
if !pollers.IsValidURL(locURL) { if !pollers.IsValidURL(locURL) {
return nil, fmt.Errorf("invalid polling URL %s", locURL) return nil, fmt.Errorf("invalid polling URL %s", locURL)
} }
// check for provisioning state. if the operation is a RELO
// and terminates synchronously this will prevent extra polling.
// it's ok if there's no provisioning state.
state, _ := pollers.GetProvisioningState(resp)
if state == "" {
state = pollers.StatusInProgress
}
return &Poller[T]{ return &Poller[T]{
pl: pl, pl: pl,
resp: resp, resp: resp,
Type: kind, Type: kind,
PollURL: locURL, PollURL: locURL,
CurState: pollers.StatusInProgress, CurState: state,
}, nil }, nil
} }

View file

@ -30,5 +30,5 @@ const (
Module = "azcore" Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module. // Version is the semantic version (see http://semver.org) of this module.
Version = "v1.0.0" Version = "v1.1.4"
) )

View file

@ -69,7 +69,8 @@ type LogOptions struct {
} }
// RetryOptions configures the retry policy's behavior. // RetryOptions configures the retry policy's behavior.
// Call NewRetryOptions() to create an instance with default values. // Zero-value fields will have their specified default values applied during use.
// This allows for modification of a subset of fields.
type RetryOptions struct { type RetryOptions struct {
// MaxRetries specifies the maximum number of attempts a failed operation will be retried // MaxRetries specifies the maximum number of attempts a failed operation will be retried
// before producing an error. // before producing an error.
@ -82,6 +83,7 @@ type RetryOptions struct {
TryTimeout time.Duration TryTimeout time.Duration
// RetryDelay specifies the initial amount of delay to use before retrying an operation. // RetryDelay specifies the initial amount of delay to use before retrying an operation.
// The value is used only if the HTTP response does not contain a Retry-After header.
// The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay.
// The default value is four seconds. A value less than zero means no delay between retries. // The default value is four seconds. A value less than zero means no delay between retries.
RetryDelay time.Duration RetryDelay time.Duration
@ -92,8 +94,15 @@ type RetryOptions struct {
MaxRetryDelay time.Duration MaxRetryDelay time.Duration
// StatusCodes specifies the HTTP status codes that indicate the operation should be retried. // StatusCodes specifies the HTTP status codes that indicate the operation should be retried.
// The default value is the status codes in StatusCodesForRetry. // A nil slice will use the following values.
// Specifying an empty slice will cause retries to happen only for transport errors. // http.StatusRequestTimeout 408
// http.StatusTooManyRequests 429
// http.StatusInternalServerError 500
// http.StatusBadGateway 502
// http.StatusServiceUnavailable 503
// http.StatusGatewayTimeout 504
// Specifying values will replace the default values.
// Specifying an empty slice will disable retries for HTTP status codes.
StatusCodes []int StatusCodes []int
} }

View file

@ -9,7 +9,7 @@ package runtime
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io/ioutil" "io"
"net/http" "net/http"
"sort" "sort"
"strings" "strings"
@ -210,7 +210,7 @@ func writeReqBody(req *policy.Request, b *bytes.Buffer) error {
if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) {
return nil return nil
} }
body, err := ioutil.ReadAll(req.Raw().Body) body, err := io.ReadAll(req.Raw().Body)
if err != nil { if err != nil {
fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error())
return err return err

View file

@ -38,11 +38,12 @@ func setDefaults(o *policy.RetryOptions) {
o.MaxRetryDelay = math.MaxInt64 o.MaxRetryDelay = math.MaxInt64
} }
if o.RetryDelay == 0 { if o.RetryDelay == 0 {
o.RetryDelay = 4 * time.Second o.RetryDelay = 800 * time.Millisecond
} else if o.RetryDelay < 0 { } else if o.RetryDelay < 0 {
o.RetryDelay = 0 o.RetryDelay = 0
} }
if o.StatusCodes == nil { if o.StatusCodes == nil {
// NOTE: if you change this list, you MUST update the docs in policy/policy.go
o.StatusCodes = []int{ o.StatusCodes = []int{
http.StatusRequestTimeout, // 408 http.StatusRequestTimeout, // 408
http.StatusTooManyRequests, // 429 http.StatusTooManyRequests, // 429
@ -106,7 +107,7 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
try := int32(1) try := int32(1)
for { for {
resp = nil // reset resp = nil // reset
log.Writef(log.EventRetryPolicy, "\n=====> Try=%d %s %s", try, req.Raw().Method, req.Raw().URL.String()) log.Writef(log.EventRetryPolicy, "=====> Try=%d", try)
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
// the stream may not be at offset 0 when we first get it and we want the same behavior for the // the stream may not be at offset 0 when we first get it and we want the same behavior for the
@ -145,6 +146,7 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
if err == nil && !HasStatusCode(resp, options.StatusCodes...) { if err == nil && !HasStatusCode(resp, options.StatusCodes...) {
// if there is no error and the response code isn't in the list of retry codes then we're done. // if there is no error and the response code isn't in the list of retry codes then we're done.
log.Write(log.EventRetryPolicy, "exit due to non-retriable status code")
return return
} else if ctxErr := req.Raw().Context().Err(); ctxErr != nil { } else if ctxErr := req.Raw().Context().Err(); ctxErr != nil {
// don't retry if the parent context has been cancelled or its deadline exceeded // don't retry if the parent context has been cancelled or its deadline exceeded
@ -167,14 +169,19 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
return return
} }
// drain before retrying so nothing is leaked
Drain(resp)
// use the delay from retry-after if available // use the delay from retry-after if available
delay := shared.RetryAfter(resp) delay := shared.RetryAfter(resp)
if delay <= 0 { if delay <= 0 {
delay = calcDelay(options, try) delay = calcDelay(options, try)
} else if delay > options.MaxRetryDelay {
// the retry-after delay exceeds the the cap so don't retry
log.Writef(log.EventRetryPolicy, "Retry-After delay %s exceeds MaxRetryDelay of %s", delay, options.MaxRetryDelay)
return
} }
// drain before retrying so nothing is leaked
Drain(resp)
log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay)
select { select {
case <-time.After(delay): case <-time.After(delay):

View file

@ -10,6 +10,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"errors" "errors"
"flag"
"fmt" "fmt"
"net/http" "net/http"
"time" "time"
@ -210,7 +211,8 @@ func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOpt
cp.Frequency = 30 * time.Second cp.Frequency = 30 * time.Second
} }
if cp.Frequency < time.Second { // skip the floor check when executing tests so they don't take so long
if isTest := flag.Lookup("test.v"); isTest == nil && cp.Frequency < time.Second {
return *new(T), errors.New("polling frequency minimum is one second") return *new(T), errors.New("polling frequency minimum is one second")
} }

View file

@ -15,6 +15,7 @@ import (
"fmt" "fmt"
"io" "io"
"mime/multipart" "mime/multipart"
"path"
"reflect" "reflect"
"strings" "strings"
"time" "time"
@ -37,6 +38,7 @@ const (
) )
// NewRequest creates a new policy.Request with the specified input. // NewRequest creates a new policy.Request with the specified input.
// The endpoint MUST be properly encoded before calling this function.
func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) {
return exported.NewRequest(ctx, httpMethod, endpoint) return exported.NewRequest(ctx, httpMethod, endpoint)
} }
@ -55,19 +57,23 @@ func JoinPaths(root string, paths ...string) string {
root, qps = splitPath[0], splitPath[1] root, qps = splitPath[0], splitPath[1]
} }
for i := 0; i < len(paths); i++ { p := path.Join(paths...)
root = strings.TrimRight(root, "/") // path.Join will remove any trailing slashes.
paths[i] = strings.TrimLeft(paths[i], "/") // if one was provided, preserve it.
root += "/" + paths[i] if strings.HasSuffix(paths[len(paths)-1], "/") && !strings.HasSuffix(p, "/") {
p += "/"
} }
if qps != "" { if qps != "" {
if !strings.HasSuffix(root, "/") { p = p + "?" + qps
root += "/"
}
return root + "?" + qps
} }
return root
if strings.HasSuffix(root, "/") && strings.HasPrefix(p, "/") {
root = root[:len(root)-1]
} else if !strings.HasSuffix(root, "/") && !strings.HasPrefix(p, "/") {
p = "/" + p
}
return root + p
} }
// EncodeByteArray will base-64 encode the byte slice v. // EncodeByteArray will base-64 encode the byte slice v.

View file

@ -13,7 +13,6 @@ import (
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
@ -86,7 +85,7 @@ func UnmarshalAsXML(resp *http.Response, v interface{}) error {
// Drain reads the response body to completion then closes it. The bytes read are discarded. // Drain reads the response body to completion then closes it. The bytes read are discarded.
func Drain(resp *http.Response) { func Drain(resp *http.Response) {
if resp != nil && resp.Body != nil { if resp != nil && resp.Body != nil {
_, _ = io.Copy(ioutil.Discard, resp.Body) _, _ = io.Copy(io.Discard, resp.Body)
resp.Body.Close() resp.Body.Close()
} }
} }

View file

@ -15,7 +15,7 @@ import (
// Caller returns the file and line number of a frame on the caller's stack. // Caller returns the file and line number of a frame on the caller's stack.
// If the funtion fails an empty string is returned. // If the funtion fails an empty string is returned.
// skipFrames - the number of frames to skip when determining the caller. // skipFrames - the number of frames to skip when determining the caller.
// Passing a value of 0 will return the immediate caller of this function. // Passing a value of 0 will return the immediate caller of this function.
func Caller(skipFrames int) string { func Caller(skipFrames int) string {
if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok {
// the skipFrames + 1 is to skip ourselves // the skipFrames + 1 is to skip ourselves

View file

@ -1,26 +1,47 @@
# Release History # Release History
## 0.5.0 (2022-09-29)
### Breaking Changes
* Complete architectural change for better user experience. Please view the [README](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob#readme)
### Features Added
* Added [UserDelegationCredential](https://learn.microsoft.com/rest/api/storageservices/create-user-delegation-sas) which resolves [#18976](https://github.com/Azure/azure-sdk-for-go/issues/18976), [#16916](https://github.com/Azure/azure-sdk-for-go/issues/16916), [#18977](https://github.com/Azure/azure-sdk-for-go/issues/18977)
* Added [Restore Container API](https://learn.microsoft.com/rest/api/storageservices/restore-container).
### Bugs Fixed
* Fixed issue [#18767](https://github.com/Azure/azure-sdk-for-go/issues/18767)
* Fix deadlock when error writes are slow [#16937](https://github.com/Azure/azure-sdk-for-go/pull/16937)
## 0.4.1 (2022-05-12) ## 0.4.1 (2022-05-12)
### Other Changes ### Other Changes
* Updated to latest `azcore` and `internal` modules * Updated to latest `azcore` and `internal` modules
## 0.4.0 (2022-04-19) ## 0.4.0 (2022-04-19)
### Breaking Changes ### Breaking Changes
* Fixed Issue #17150 : Renaming/refactoring high level methods. * Fixed Issue #17150 : Renaming/refactoring high level methods.
* Fixed Issue #16972 : Constructors should return clients by reference. * Fixed Issue #16972 : Constructors should return clients by reference.
* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags remains the same. * Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags
remains the same.
### Bugs Fixed ### Bugs Fixed
* Fixed Issue #17515 : SetTags options bag missing leaseID. * Fixed Issue #17515 : SetTags options bag missing leaseID.
* Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`. * Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`.
* Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call. * Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call.
* Fixed Issue #17188 : `BlobURLParts` not supporting VersionID * Fixed Issue #17188 : `BlobURLParts` not supporting VersionID
* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods ignoring the options bag. * Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods
ignoring the options bag.
* Fixed Issue #16920 : Fixing error handling example. * Fixed Issue #16920 : Fixing error handling example.
* Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations. * Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations.
* Fixed Issue #16679 : Response parsing issue in List blobs API. * Fixed Issue #16679 : Response parsing issue in List blobs API.
## 0.3.0 (2022-02-09) ## 0.3.0 (2022-02-09)

View file

@ -1,397 +1,274 @@
# Azure Blob Storage SDK for Go # Azure Blob Storage SDK for Go
## Introduction > Server Version: 2020-10-02
The Microsoft Azure Storage SDK for Go allows you to build applications that takes advantage of Azure's scalable cloud Azure Blob storage is Microsoft's object storage solution for the cloud. Blob
storage. This is the new beta client module for Azure Blob Storage, which follows storage is optimized for storing massive amounts of unstructured data.
our [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html) and replaces the Unstructured data is data that does not adhere to a particular data model or
previous beta [azblob package](https://github.com/azure/azure-storage-blob-go). definition, such as text or binary data.
## Getting Started [Source code][source] | [API reference documentation][docs] | [REST API documentation][rest_docs] | [Product documentation][product_docs]
The Azure Blob SDK can access an Azure Storage account. ## Getting started
### Prerequisites
* Go versions 1.18 or higher
* You must have an [Azure storage account][azure_storage_account]. If you need to create one, you can use
the [Azure Cloud Shell](https://shell.azure.com/bash) to create one with these commands (replace `my-resource-group`
and `mystorageaccount` with your own unique names):
(Optional) if you want a new resource group to hold the Storage Account:
```
az group create --name my-resource-group --location westus2
```
Create the storage account:
```
az storage account create --resource-group my-resource-group --name mystorageaccount
```
The storage account name can be queried with:
```
az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob"
```
You can set this as an environment variable with:
```bash
# PowerShell
$ENV:AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount"
# bash
export AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount"
```
Query your storage account keys:
```
az storage account keys list --resource-group my-resource-group -n mystorageaccount
```
Output:
```json
[
{
"creationTime": "2022-02-07T17:18:44.088870+00:00",
"keyName": "key1",
"permissions": "FULL",
"value": "..."
},
{
"creationTime": "2022-02-07T17:18:44.088870+00:00",
"keyName": "key2",
"permissions": "FULL",
"value": "..."
}
]
```
```bash
# PowerShell
$ENV:AZURE_STORAGE_ACCOUNT_KEY="<mystorageaccountkey>"
# Bash
export AZURE_STORAGE_ACCOUNT_KEY="<mystorageaccountkey>"
```
> You can obtain your account key from the Azure Portal under the "Access Keys" section on the left-hand pane of your storage account.
#### Create account
* To create a new Storage account, you can use [Azure Portal][azure_portal_create_account]
, [Azure PowerShell][azure_powershell_create_account], or [Azure CLI][azure_cli_create_account].
### Install the package ### Install the package
* Install the Azure Blob Storage client module for Go with `go get`: Install the Azure Blob Storage SDK for Go with [go get][goget]:
```bash ```Powershell
go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
``` ```
> Optional: If you are going to use AAD authentication, install the `azidentity` package: If you're going to authenticate with Azure Active Directory (recommended), install the [azidentity][azidentity] module.
```Powershell
```bash
go get github.com/Azure/azure-sdk-for-go/sdk/azidentity go get github.com/Azure/azure-sdk-for-go/sdk/azidentity
``` ```
#### Create the client ### Prerequisites
`azblob` allows you to interact with three types of resources :- A supported [Go][godevdl] version (the Azure SDK supports the two most recent Go releases).
* [Azure storage accounts][azure_storage_account]. You need an [Azure subscription][azure_sub] and a
* [Containers](https://azure.microsoft.com/en-in/overview/what-is-a-container/#overview) within those storage accounts. [Storage Account][storage_account_docs] to use this package.
* [Blobs](https://azure.microsoft.com/en-in/services/storage/blobs/#overview) (block blobs/ page blobs/ append blobs)
within those containers.
Interaction with these resources starts with an instance of a [client](#clients). To create a client object, you will To create a new Storage Account, you can use the [Azure Portal][storage_account_create_portal],
need the account's blob service endpoint URL and a credential that allows you to access the account. The `endpoint` can [Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli].
be found on the page for your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" Here's an example using the Azure CLI:
section or by running the following Azure CLI command:
```bash ```Powershell
# Get the blob service URL for the account az storage account create --name MyStorageAccount --resource-group MyResourceGroup --location westus --sku Standard_LRS
az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob"
``` ```
Once you have the account URL, it can be used to create the service client: ### Authenticate the client
```golang In order to interact with the Azure Blob Storage service, you'll need to create an instance of the `azblob.Client` type. The [azidentity][azidentity] module makes it easy to add Azure Active Directory support for authenticating Azure SDK clients with their corresponding Azure services.
cred, err := azblob.NewSharedKeyCredential("myAccountName", "myAccountKey")
handle(err)
serviceClient, err := azblob.NewServiceClientWithSharedKey("https://<myAccountName>.blob.core.windows.net/", cred, nil)
handle(err)
```
For more information about blob service URL's and how to configure custom domain names for Azure Storage check out
the [official documentation][azure_portal_account_url]
#### Types of credentials
The azblob clients support authentication via Shared Key Credential, Connection String, Shared Access Signature, or any
of the `azidentity` types that implement the `azcore.TokenCredential` interface.
##### 1. Creating the client from a shared key
To use an account [shared key][azure_shared_key] (aka account key or access key), provide the key as a string. This can
be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" section or by
running the following Azure CLI command:
```bash
az storage account keys list -g my-resource-group -n mystorageaccount
```
Use Shared Key authentication as the credential parameter to authenticate the client:
```golang
credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey")
handle(err)
serviceClient, err := azblob.NewServiceClientWithSharedKey("https://<myAccountName>.blob.core.windows.net/", credential, nil)
handle(err)
```
##### 2. Creating the client from a connection string
You can use connection string, instead of providing the account URL and credential separately, for authentication as
well. To do this, pass the connection string to the client's `NewServiceClientFromConnectionString` method. The
connection string can be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access
Keys" section or with the following Azure CLI command:
```bash
az storage account show-connection-string -g my-resource-group -n mystorageaccount
```
```golang
connStr := "DefaultEndpointsProtocol=https;AccountName=<myAccountName>;AccountKey=<myAccountKey>;EndpointSuffix=core.windows.net"
serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil)
```
##### 3. Creating the client from a SAS token
To use a [shared access signature (SAS) token][azure_sas_token], provide the token as a string. You can generate a SAS
token from the Azure Portal
under [Shared access signature](https://docs.microsoft.com/rest/api/storageservices/create-service-sas) or use
the `ServiceClient.GetSASToken` or `ContainerClient.GetSASToken()` methods.
```golang
credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey")
handle(err)
serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), credential, nil)
handle(err)
// Provide the convenience function with relevant info (services, resource types, permissions, and duration)
// The SAS token will be valid from this moment onwards.
accountSAS, err := serviceClient.GetSASToken(AccountSASResourceTypes{Object: true, Service: true, Container: true},
AccountSASPermissions{Read: true, List: true}, AccountSASServices{Blob: true}, time.Now(), time.Now().Add(48*time.Hour))
handle(err)
sasURL := fmt.Sprintf("https://%s.blob.core.windows.net/?%s", accountName, accountSAS)
// The sasURL can be used to authenticate a client without need for a credential
serviceClient, err = NewServiceClientWithNoCredential(sasURL, nil)
handle(err)
```
### Clients
Three different clients are provided to interact with the various components of the Blob Service:
1. **`ServiceClient`**
* Get and set account settings.
* Query, create, and delete containers within the account.
2. **`ContainerClient`**
* Get and set container access settings, properties, and metadata.
* Create, delete, and query blobs within the container.
* `ContainerLeaseClient` to support container lease management.
3. **`BlobClient`**
* `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient`
* Get and set blob properties.
* Perform CRUD operations on a given blob.
* `BlobLeaseClient` to support blob lease management.
### Example
```go ```go
// Use your storage account's name and key to create a credential object, used to access your account. // create a credential for authenticating with Azure Active Directory
// You can obtain these details from the Azure Portal. cred, err := azidentity.NewDefaultAzureCredential(nil)
accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") // TODO: handle err
if !ok {
handle(errors.New("AZURE_STORAGE_ACCOUNT_NAME could not be found")) // create an azblob.Client for the specified storage account that uses the above credential
client, err := azblob.NewClient("https://MYSTORAGEACCOUNT.blob.core.windows.net/", cred, nil)
// TODO: handle err
```
Learn more about enabling Azure Active Directory for authentication with Azure Storage in [our documentation][storage_ad] and [our samples](#next-steps).
## Key concepts
Blob storage is designed for:
- Serving images or documents directly to a browser.
- Storing files for distributed access.
- Streaming video and audio.
- Writing to log files.
- Storing data for backup and restore, disaster recovery, and archiving.
- Storing data for analysis by an on-premises or Azure-hosted service.
Blob storage offers three types of resources:
- The _storage account_
- One or more _containers_ in a storage account
- One ore more _blobs_ in a container
Instances of the `azblob.Client` type provide methods for manipulating containers and blobs within a storage account.
The storage account is specified when the `azblob.Client` is constructed.
Use the appropriate client constructor function for the authentication mechanism you wish to use.
Learn more about options for authentication _(including Connection Strings, Shared Key, Shared Access Signatures (SAS), Azure Active Directory (AAD), and anonymous public access)_ [in our examples.](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go)
### Goroutine safety
We guarantee that all client instance methods are goroutine-safe and independent of each other ([guideline](https://azure.github.io/azure-sdk/golang_introduction.html#thread-safety)). This ensures that the recommendation of reusing client instances is always safe, even across goroutines.
### About blob metadata
Blob metadata name/value pairs are valid HTTP headers and should adhere to all restrictions governing HTTP headers. Metadata names must be valid HTTP header names, may contain only ASCII characters, and should be treated as case-insensitive. Base64-encode or URL-encode metadata values containing non-ASCII characters.
### Additional concepts
<!-- CLIENT COMMON BAR -->
[Client options](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy#ClientOptions) |
[Accessing the response](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime#WithCaptureResponse) |
[Handling failures](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError) |
[Logging](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore/log)
<!-- CLIENT COMMON BAR -->
## Examples
### Uploading a blob
```go
const (
account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/"
containerName = "sample-container"
blobName = "sample-blob"
sampleFile = "path/to/sample/file"
)
// authenticate with Azure Active Directory
cred, err := azidentity.NewDefaultAzureCredential(nil)
// TODO: handle error
// create a client for the specified storage account
client, err := azblob.NewClient(account, cred, nil)
// TODO: handle error
// open the file for reading
file, err := os.OpenFile(sampleFile, os.O_RDONLY, 0)
// TODO: handle error
defer file.Close()
// upload the file to the specified container with the specified blob name
_, err = client.UploadFile(context.TODO(), containerName, blobName, file, nil)
// TODO: handle error
```
### Downloading a blob
```go
// this example accesses a public blob via anonymous access, so no credentials are required
client, err := azblob.NewClientWithNoCredential("https://azurestoragesamples.blob.core.windows.net/", nil)
// TODO: handle error
// create or open a local file where we can download the blob
file, err := os.Create("cloud.jpg")
// TODO: handle error
defer file.Close()
// download the blob
_, err = client.DownloadFile(context.TODO(), "samples", "cloud.jpg", file, nil)
// TODO: handle error
```
### Enumerating blobs
```go
const (
account = "https://MYSTORAGEACCOUNT.blob.core.windows.net/"
containerName = "sample-container"
)
// authenticate with Azure Active Directory
cred, err := azidentity.NewDefaultAzureCredential(nil)
// TODO: handle error
// create a client for the specified storage account
client, err := azblob.NewClient(account, cred, nil)
// TODO: handle error
// blob listings are returned across multiple pages
pager := client.NewListBlobsFlatPager(containerName, nil)
// continue fetching pages until no more remain
for pager.More() {
// advance to the next page
page, err := pager.NextPage(context.TODO())
// TODO: handle error
// print the blob names for this page
for _, blob := range page.Segment.BlobItems {
fmt.Println(*blob.Name)
}
} }
accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY")
if !ok {
handle(errors.New("AZURE_STORAGE_ACCOUNT_KEY could not be found"))
}
cred, err := NewSharedKeyCredential(accountName, accountKey)
handle(err)
// Open up a service client.
// You'll need to specify a service URL, which for blob endpoints usually makes up the syntax http(s)://<account>.blob.core.windows.net/
service, err := NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil)
handle(err)
// All operations in the Azure Blob Storage SDK for Go operate on a context.Context, allowing you to control cancellation/timeout.
ctx := context.Background() // This example has no expiry.
// This example showcases several common operations to help you get started, such as:
// ===== 1. Creating a container =====
// First, branch off of the service client and create a container client.
container := service.NewContainerClient("mycontainer")
// Then, fire off a create operation on the container client.
// Note that, all service-side requests have an options bag attached, allowing you to specify things like metadata, public access types, etc.
// Specifying nil omits all options.
_, err = container.Create(ctx, nil)
handle(err)
// ===== 2. Uploading/downloading a block blob =====
// We'll specify our data up-front, rather than reading a file for simplicity's sake.
data := "Hello world!"
// Branch off of the container into a block blob client
blockBlob := container.NewBlockBlobClient("HelloWorld.txt")
// Upload data to the block blob
_, err = blockBlob.Upload(ctx, NopCloser(strings.NewReader(data)), nil)
handle(err)
// Download the blob's contents and ensure that the download worked properly
get, err := blockBlob.Download(ctx, nil)
handle(err)
// Open a buffer, reader, and then download!
downloadedData := &bytes.Buffer{}
// RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here.
reader := get.Body(RetryReaderOptions{})
_, err = downloadedData.ReadFrom(reader)
handle(err)
err = reader.Close()
handle(err)
if data != downloadedData.String() {
handle(errors.New("downloaded data doesn't match uploaded data"))
}
// ===== 3. list blobs =====
// The ListBlobs and ListContainers APIs return two channels, a values channel, and an errors channel.
// You should enumerate on a range over the values channel, and then check the errors channel, as only ONE value will ever be passed to the errors channel.
// The AutoPagerTimeout defines how long it will wait to place into the items channel before it exits & cleans itself up. A zero time will result in no timeout.
pager := container.ListBlobsFlat(nil)
for pager.NextPage(ctx) {
resp := pager.PageResponse()
for _, v := range resp.ContainerListBlobFlatSegmentResult.Segment.BlobItems {
fmt.Println(*v.Name)
}
}
if err = pager.Err(); err != nil {
handle(err)
}
// Delete the blob we created earlier.
_, err = blockBlob.Delete(ctx, nil)
handle(err)
// Delete the container we created earlier.
_, err = container.Delete(ctx, nil)
handle(err)
``` ```
## Troubleshooting ## Troubleshooting
### Error Handling All Blob service operations will return an
[*azcore.ResponseError][azcore_response_error] on failure with a
populated `ErrorCode` field. Many of these errors are recoverable.
The [bloberror][blob_error] package provides the possible Storage error codes
along with various helper facilities for error handling.
All I/O operations will return an `error` that can be investigated to discover more information about the error. In ```go
addition, you can investigate the raw response of any response object: const (
connectionString = "<connection_string>"
containerName = "sample-container"
)
```golang // create a client with the provided connection string
var storageErr *azblob.StorageError client, err := azblob.NewClientFromConnectionString(connectionString, nil)
resp, err := serviceClient.CreateContainer(context.Background(), "testcontainername", nil) // TODO: handle error
if err != nil && errors.As(err, &storageErr) {
// do something with storageErr.Response() // try to delete the container, avoiding any potential race conditions with an in-progress or completed deletion
_, err = client.DeleteContainer(context.TODO(), containerName, nil)
if bloberror.HasCode(err, bloberror.ContainerBeingDeleted, bloberror.ContainerNotFound) {
// ignore any errors if the container is being deleted or already has been deleted
} else if err != nil {
// TODO: some other error
} }
``` ```
### Logging ## Next steps
This module uses the classification based logging implementation in azcore. To turn on logging Get started with our [Blob samples][samples]. They contain complete examples of the above snippets and more.
set `AZURE_SDK_GO_LOGGING` to `all`.
If you only want to include logs for `azblob`, you must create your own logger and set the log classification ### Specialized clients
as `LogCredential`.
To obtain more detailed logging, including request/response bodies and header values, make sure to leave the logger as The Azure Blob Storage SDK for Go also provides specialized clients in various subpackages.
default or enable the `LogRequest` and/or `LogResponse` classificatons. A logger that only includes credential logs can Use these clients when you need to interact with a specific kind of blob.
be like the following: Learn more about the various types of blobs from the following links.
```golang - [appendblob][append_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-append-blobs)
import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" - [blockblob][block_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-block-blobs)
// Set log to output to the console - [pageblob][page_blob] - [REST docs](https://docs.microsoft.com/rest/api/storageservices/understanding-block-blobs--append-blobs--and-page-blobs#about-page-blobs)
azlog.SetListener(func (cls azlog.Classification, msg string) {
fmt.Println(msg) // printing log out to the console
})
// Includes only requests and responses in credential logs The [blob][blob] package contains APIs common to all blob types. This includes APIs for deleting and undeleting a blob, setting metadata, and more.
azlog.SetClassifications(azlog.Request, azlog.Response)
```
> CAUTION: logs from credentials contain sensitive information. The [lease][lease] package contains clients for managing leases on blobs and containers. Please see the [reference docs](https://docs.microsoft.com/rest/api/storageservices/lease-blob#remarks) for general information on leases.
> These logs must be protected to avoid compromising account security.
>
## License The [container][container] package contains APIs specific to containers. This includes APIs setting access policies or properties, and more.
This project is licensed under MIT. The [service][service] package contains APIs specific to blob service. This includes APIs for manipulating containers, retrieving account information, and more.
## Provide Feedback The [sas][sas] package contains utilities to aid in the creation and manipulation of Shared Access Signature tokens.
See the package's documentation for more information.
If you encounter bugs or have suggestions, please
[open an issue](https://github.com/Azure/azure-sdk-for-go/issues) and assign the `Azure.AzBlob` label.
## Contributing ## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License See the [Storage CONTRIBUTING.md][storage_contrib] for details on building,
Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For testing, and contributing to this library.
details, visit https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate This project welcomes contributions and suggestions. Most contributions require
the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to you to agree to a Contributor License Agreement (CLA) declaring that you have
do this once across all repos using our CLA. the right to, and actually do, grant us the rights to use your contribution. For
details, visit [cla.microsoft.com][cla].
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). This project has adopted the [Microsoft Open Source Code of Conduct][coc].
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or For more information see the [Code of Conduct FAQ][coc_faq]
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. or contact [opencode@microsoft.com][coc_contact] with any
additional questions or comments.
![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-go%2Fsdk%2Fstorage%2Fazblob%2FREADME.png)
<!-- LINKS --> <!-- LINKS -->
[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob
[azure_subscription]:https://azure.microsoft.com/free/ [docs]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
[rest_docs]: https://docs.microsoft.com/rest/api/storageservices/blob-service-rest-api
[azure_storage_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal [product_docs]: https://docs.microsoft.com/azure/storage/blobs/storage-blobs-overview
[godevdl]: https://go.dev/dl/
[azure_portal_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal [goget]: https://pkg.go.dev/cmd/go#hdr-Add_dependencies_to_current_module_and_install_them
[storage_account_docs]: https://docs.microsoft.com/azure/storage/common/storage-account-overview
[azure_powershell_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-powershell [storage_account_create_ps]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-powershell
[storage_account_create_cli]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-cli
[azure_cli_create_account]: https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-cli [storage_account_create_portal]: https://docs.microsoft.com/azure/storage/common/storage-quickstart-create-account?tabs=azure-portal
[azure_cli]: https://docs.microsoft.com/cli/azure
[azure_cli_account_url]:https://docs.microsoft.com/cli/azure/storage/account?view=azure-cli-latest#az-storage-account-show [azure_sub]: https://azure.microsoft.com/free/
[azidentity]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity
[azure_powershell_account_url]:https://docs.microsoft.com/powershell/module/az.storage/get-azstorageaccount?view=azps-4.6.1 [storage_ad]: https://docs.microsoft.com/azure/storage/common/storage-auth-aad
[azcore_response_error]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore#ResponseError
[azure_portal_account_url]:https://docs.microsoft.com/azure/storage/common/storage-account-overview#storage-account-endpoints [samples]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/storage/azblob/examples_test.go
[append_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/appendblob/client.go
[azure_sas_token]:https://docs.microsoft.com/azure/storage/common/storage-sas-overview [blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blob/client.go
[blob_error]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/bloberror/error_codes.go
[azure_shared_key]:https://docs.microsoft.com/rest/api/storageservices/authorize-with-shared-key [block_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/blockblob/client.go
[container]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/container/client.go
[azure_core_ref_docs]:https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore [lease]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/lease
[page_blob]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/pageblob/client.go
[azure_core_readme]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azcore/README.md [sas]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/sas
[service]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob/service/client.go
[blobs_error_codes]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes [storage_contrib]: https://github.com/Azure/azure-sdk-for-go/blob/main/CONTRIBUTING.md
[cla]: https://cla.microsoft.com
[msft_oss_coc]:https://opensource.microsoft.com/codeofconduct/ [coc]: https://opensource.microsoft.com/codeofconduct/
[coc_faq]: https://opensource.microsoft.com/codeofconduct/faq/
[msft_oss_coc_faq]:https://opensource.microsoft.com/codeofconduct/faq/ [coc_contact]: mailto:opencode@microsoft.com
[contact_msft_oss]:mailto:opencode@microsoft.com
[blobs_rest]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-rest-api

View file

@ -0,0 +1,263 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package appendblob
import (
"context"
"io"
"os"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
// Client represents a client to an Azure Storage append blob;
type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClient]
// NewClient creates an AppendBlobClient with the specified URL, Azure AD credential, and options.
func NewClient(blobURL string, cred azcore.TokenCredential, o *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
conOptions := shared.GetClientOptions(o)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithNoCredential creates an AppendBlobClient with the specified URL and options.
func NewClientWithNoCredential(blobURL string, o *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(o)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithSharedKeyCredential creates an AppendBlobClient with the specified URL, shared key, and options.
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, o *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(o)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil
}
// NewClientFromConnectionString creates Client from a connection String
func NewClientFromConnectionString(connectionString, containerName, blobName string, o *ClientOptions) (*Client, error) {
parsed, err := shared.ParseConnectionString(connectionString)
if err != nil {
return nil, err
}
parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName)
if parsed.AccountKey != "" && parsed.AccountName != "" {
credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey)
if err != nil {
return nil, err
}
return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, o)
}
return NewClientWithNoCredential(parsed.ServiceURL, o)
}
// BlobClient returns the embedded blob client for this AppendBlob client.
func (ab *Client) BlobClient() *blob.Client {
innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab))
return (*blob.Client)(innerBlob)
}
func (ab *Client) sharedKey() *blob.SharedKeyCredential {
return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab))
}
func (ab *Client) generated() *generated.AppendBlobClient {
_, appendBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.AppendBlobClient])(ab))
return appendBlob
}
// URL returns the URL endpoint used by the Client object.
func (ab *Client) URL() string {
return ab.generated().Endpoint()
}
// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (ab *Client) WithSnapshot(snapshot string) (*Client, error) {
p, err := blob.ParseURL(ab.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (ab *Client) WithVersionID(versionID string) (*Client, error) {
p, err := blob.ParseURL(ab.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil
}
// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (ab *Client) Create(ctx context.Context, o *CreateOptions) (CreateResponse, error) {
opts, httpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format()
resp, err := ab.generated().Create(ctx, 0, opts, httpHeaders, leaseAccessConditions, cpkInfo,
cpkScopeInfo, modifiedAccessConditions)
return resp, err
}
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
func (ab *Client) AppendBlock(ctx context.Context, body io.ReadSeekCloser, o *AppendBlockOptions) (AppendBlockResponse, error) {
count, err := shared.ValidateSeekableStreamAt0AndGetCount(body)
if err != nil {
return AppendBlockResponse{}, nil
}
appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := o.format()
resp, err := ab.generated().AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions)
return resp, err
}
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
func (ab *Client) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlockFromURLOptions) (AppendBlockFromURLResponse, error) {
appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format()
// content length should be 0 on * from URL. always. It's a 400 if it isn't.
resp, err := ab.generated().AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo,
leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
return resp, err
}
// Seal - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only.
// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal
func (ab *Client) Seal(ctx context.Context, o *SealOptions) (SealResponse, error) {
leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := o.format()
resp, err := ab.generated().Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions)
return resp, err
}
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (ab *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) {
return ab.BlobClient().Delete(ctx, o)
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
func (ab *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) {
return ab.BlobClient().Undelete(ctx, o)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (ab *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) {
return ab.BlobClient().SetTier(ctx, tier, o)
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (ab *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) {
return ab.BlobClient().GetProperties(ctx, o)
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
return ab.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o)
}
// SetMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (ab *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) {
return ab.BlobClient().SetMetadata(ctx, metadata, o)
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
func (ab *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) {
return ab.BlobClient().CreateSnapshot(ctx, o)
}
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (ab *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) {
return ab.BlobClient().StartCopyFromURL(ctx, copySource, o)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
func (ab *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) {
return ab.BlobClient().AbortCopyFromURL(ctx, copyID, o)
}
// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
// Each call to this operation replaces all existing tags attached to the blob.
// To remove all tags from the blob, call this operation with no tags set.
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
func (ab *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) {
return ab.BlobClient().SetTags(ctx, tags, o)
}
// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
func (ab *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) {
return ab.BlobClient().GetTags(ctx, o)
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) {
return ab.BlobClient().CopyFromURL(ctx, copySource, o)
}
// Concurrent Download Functions -----------------------------------------------------------------------------------------
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (ab *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) {
return ab.BlobClient().DownloadStream(ctx, o)
}
// DownloadBuffer downloads an Azure blob to a buffer with parallel.
func (ab *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) {
return ab.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o)
}
// DownloadFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
func (ab *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) {
return ab.BlobClient().DownloadFile(ctx, file, o)
}

View file

@ -0,0 +1,166 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package appendblob
import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// Type Declarations ---------------------------------------------------------------------
// AppendPositionAccessConditions contains a group of parameters for the Client.AppendBlock method.
type AppendPositionAccessConditions = generated.AppendPositionAccessConditions
// Request Model Declaration -------------------------------------------------------------------------------------------
// CreateOptions provides set of configurations for Create Append Blob operation
type CreateOptions struct {
// Specifies the date time when the blobs immutability policy is set to expire.
ImmutabilityPolicyExpiry *time.Time
// Specifies the immutability policy mode to set on the blob.
ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting
// Specified if a legal hold should be set on the blob.
LegalHold *bool
AccessConditions *blob.AccessConditions
HTTPHeaders *blob.HTTPHeaders
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
// Optional. Used to set blob tags in various blob operations.
Tags map[string]string
// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
// are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
// blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
// See Naming and Referencing Containers, Blobs, and Metadata for more information.
Metadata map[string]string
}
func (o *CreateOptions) format() (*generated.AppendBlobClientCreateOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil
}
options := generated.AppendBlobClientCreateOptions{
BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags),
Metadata: o.Metadata,
ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry,
ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
LegalHold: o.LegalHold,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// AppendBlockOptions contains the optional parameters for the Client.AppendBlock method.
type AppendBlockOptions struct {
// Specify the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 []byte
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
AppendPositionAccessConditions *AppendPositionAccessConditions
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
AccessConditions *blob.AccessConditions
}
func (o *AppendBlockOptions) format() (*generated.AppendBlobClientAppendBlockOptions, *generated.AppendPositionAccessConditions,
*generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil
}
options := &generated.AppendBlobClientAppendBlockOptions{
TransactionalContentCRC64: o.TransactionalContentCRC64,
TransactionalContentMD5: o.TransactionalContentMD5,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// AppendBlockFromURLOptions contains the optional parameters for the Client.AppendBlockFromURL method.
type AppendBlockFromURLOptions struct {
// Specify the md5 calculated for the range of bytes that must be read from the copy source.
SourceContentMD5 []byte
// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
SourceContentCRC64 []byte
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
AppendPositionAccessConditions *AppendPositionAccessConditions
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions
AccessConditions *blob.AccessConditions
// Range specifies a range of bytes. The default value is all bytes.
Range blob.HTTPRange
}
func (o *AppendBlockFromURLOptions) format() (*generated.AppendBlobClientAppendBlockFromURLOptions, *generated.CpkInfo,
*generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.AppendPositionAccessConditions,
*generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil, nil
}
options := &generated.AppendBlobClientAppendBlockFromURLOptions{
SourceRange: exported.FormatHTTPRange(o.Range),
SourceContentMD5: o.SourceContentMD5,
SourceContentcrc64: o.SourceContentCRC64,
TransactionalContentMD5: o.TransactionalContentMD5,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// SealOptions provides set of configurations for SealAppendBlob operation
type SealOptions struct {
AccessConditions *blob.AccessConditions
AppendPositionAccessConditions *AppendPositionAccessConditions
}
func (o *SealOptions) format() (*generated.LeaseAccessConditions,
*generated.ModifiedAccessConditions, *generated.AppendPositionAccessConditions) {
if o == nil {
return nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return leaseAccessConditions, modifiedAccessConditions, o.AppendPositionAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------

View file

@ -0,0 +1,23 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package appendblob
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// CreateResponse contains the response from method Client.Create.
type CreateResponse = generated.AppendBlobClientCreateResponse
// AppendBlockResponse contains the response from method Client.AppendBlock.
type AppendBlockResponse = generated.AppendBlobClientAppendBlockResponse
// AppendBlockFromURLResponse contains the response from method Client.AppendBlockFromURL.
type AppendBlockFromURLResponse = generated.AppendBlobClientAppendBlockFromURLResponse
// SealResponse contains the response from method Client.Seal.
type SealResponse = generated.AppendBlobClientSealResponse

View file

@ -1,171 +0,0 @@
# Code Generation - Azure Blob SDK for Golang
<!-- autorest --use=@autorest/go@4.0.0-preview.35 https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json --file-prefix="zz_generated_" --modelerfour.lenient-model-deduplication --license-header=MICROSOFT_MIT_NO_VERSION --output-folder=generated/ --module=azblob --openapi-type="data-plane" --credential-scope=none -->
```bash
cd swagger
autorest autorest.md
gofmt -w generated/*
```
### Settings
```yaml
go: true
clear-output-folder: false
version: "^3.0.0"
license-header: MICROSOFT_MIT_NO_VERSION
input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json"
module: "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
credential-scope: "https://storage.azure.com/.default"
output-folder: internal/
file-prefix: "zz_generated_"
openapi-type: "data-plane"
verbose: true
security: AzureKey
module-version: "0.3.0"
modelerfour:
group-parameters: false
seal-single-value-enum-by-default: true
lenient-model-deduplication: true
export-clients: false
use: "@autorest/go@4.0.0-preview.36"
```
### Fix BlobMetadata.
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.BlobMetadata["properties"];
```
### Don't include container name or blob in path - we have direct URIs.
``` yaml
directive:
- from: swagger-document
where: $["x-ms-paths"]
transform: >
for (const property in $)
{
if (property.includes('/{containerName}/{blob}'))
{
$[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))});
}
else if (property.includes('/{containerName}'))
{
$[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))});
}
}
```
### Remove DataLake stuff.
``` yaml
directive:
- from: swagger-document
where: $["x-ms-paths"]
transform: >
for (const property in $)
{
if (property.includes('filesystem'))
{
delete $[property];
}
}
```
### Remove DataLakeStorageError
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.DataLakeStorageError;
```
### Fix 304s
``` yaml
directive:
- from: swagger-document
where: $["x-ms-paths"]["/{containerName}/{blob}"]
transform: >
$.get.responses["304"] = {
"description": "The condition specified using HTTP conditional header(s) is not met.",
"x-az-response-name": "ConditionNotMetError",
"headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }
};
```
### Fix GeoReplication
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.GeoReplication.properties.Status["x-ms-enum"];
$.GeoReplication.properties.Status["x-ms-enum"] = {
"name": "BlobGeoReplicationStatus",
"modelAsString": false
};
```
### Fix RehydratePriority
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.RehydratePriority["x-ms-enum"];
$.RehydratePriority["x-ms-enum"] = {
"name": "RehydratePriority",
"modelAsString": false
};
```
### Fix BlobDeleteType
``` yaml
directive:
- from: swagger-document
where: $.parameters
transform: >
delete $.BlobDeleteType.enum;
$.BlobDeleteType.enum = [
"None",
"Permanent"
];
```
### Fix EncryptionAlgorithm
``` yaml
directive:
- from: swagger-document
where: $.parameters
transform: >
delete $.EncryptionAlgorithm.enum;
$.EncryptionAlgorithm.enum = [
"None",
"AES256"
];
```
### Fix XML string "ObjectReplicationMetadata" to "OrMetadata"
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
$.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"];
delete $.BlobItemInternal.properties["ObjectReplicationMetadata"];
```

View file

@ -0,0 +1,415 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blob
import (
"context"
"errors"
"io"
"os"
"strings"
"sync"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
type Client base.Client[generated.BlobClient]
// NewClient creates a Client object using the specified URL, Azure AD credential, and options.
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithNoCredential creates a Client object using the specified URL and options.
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithSharedKeyCredential creates a Client object using the specified URL, shared key, and options.
func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil
}
// NewClientFromConnectionString creates Client from a connection String
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
parsed, err := shared.ParseConnectionString(connectionString)
if err != nil {
return nil, err
}
parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName)
if parsed.AccountKey != "" && parsed.AccountName != "" {
credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey)
if err != nil {
return nil, err
}
return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options)
}
return NewClientWithNoCredential(parsed.ServiceURL, options)
}
func (b *Client) generated() *generated.BlobClient {
return base.InnerClient((*base.Client[generated.BlobClient])(b))
}
func (b *Client) sharedKey() *SharedKeyCredential {
return base.SharedKey((*base.Client[generated.BlobClient])(b))
}
// URL returns the URL endpoint used by the Client object.
func (b *Client) URL() string {
return b.generated().Endpoint()
}
// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (b *Client) WithSnapshot(snapshot string) (*Client, error) {
p, err := ParseURL(b.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (b *Client) WithVersionID(versionID string) (*Client, error) {
p, err := ParseURL(b.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.sharedKey())), nil
}
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (b *Client) Delete(ctx context.Context, o *DeleteOptions) (DeleteResponse, error) {
deleteOptions, leaseInfo, accessConditions := o.format()
resp, err := b.generated().Delete(ctx, deleteOptions, leaseInfo, accessConditions)
return resp, err
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
func (b *Client) Undelete(ctx context.Context, o *UndeleteOptions) (UndeleteResponse, error) {
undeleteOptions := o.format()
resp, err := b.generated().Undelete(ctx, undeleteOptions)
return resp, err
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (b *Client) SetTier(ctx context.Context, tier AccessTier, o *SetTierOptions) (SetTierResponse, error) {
opts, leaseAccessConditions, modifiedAccessConditions := o.format()
resp, err := b.generated().SetTier(ctx, tier, opts, leaseAccessConditions, modifiedAccessConditions)
return resp, err
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOptions) (GetPropertiesResponse, error) {
opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions := options.format()
resp, err := b.generated().GetProperties(ctx, opts, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
return resp, err
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) {
opts, leaseAccessConditions, modifiedAccessConditions := o.format()
resp, err := b.generated().SetHTTPHeaders(ctx, opts, &HTTPHeaders, leaseAccessConditions, modifiedAccessConditions)
return resp, err
}
// SetMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (b *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *SetMetadataOptions) (SetMetadataResponse, error) {
basics := generated.BlobClientSetMetadataOptions{Metadata: metadata}
leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions := o.format()
resp, err := b.generated().SetMetadata(ctx, &basics, leaseAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions)
return resp, err
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
func (b *Client) CreateSnapshot(ctx context.Context, options *CreateSnapshotOptions) (CreateSnapshotResponse, error) {
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
// because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this
// performance hit.
opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.generated().CreateSnapshot(ctx, opts, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions)
return resp, err
}
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (b *Client) StartCopyFromURL(ctx context.Context, copySource string, options *StartCopyFromURLOptions) (StartCopyFromURLResponse, error) {
opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.generated().StartCopyFromURL(ctx, copySource, opts, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions)
return resp, err
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
func (b *Client) AbortCopyFromURL(ctx context.Context, copyID string, options *AbortCopyFromURLOptions) (AbortCopyFromURLResponse, error) {
opts, leaseAccessConditions := options.format()
resp, err := b.generated().AbortCopyFromURL(ctx, copyID, opts, leaseAccessConditions)
return resp, err
}
// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
// Each call to this operation replaces all existing tags attached to the blob.
// To remove all tags from the blob, call this operation with no tags set.
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
func (b *Client) SetTags(ctx context.Context, tags map[string]string, options *SetTagsOptions) (SetTagsResponse, error) {
serializedTags := shared.SerializeBlobTags(tags)
blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.generated().SetTags(ctx, *serializedTags, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
return resp, err
}
// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
func (b *Client) GetTags(ctx context.Context, options *GetTagsOptions) (GetTagsResponse, error) {
blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.generated().GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
return resp, err
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) {
copyOptions, smac, mac, lac := options.format()
resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac)
return resp, err
}
// GetSASURL is a convenience method for generating a SAS token for the currently pointed at blob.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
func (b *Client) GetSASURL(permissions sas.BlobPermissions, start time.Time, expiry time.Time) (string, error) {
if b.sharedKey() == nil {
return "", errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential")
}
urlParts, err := ParseURL(b.URL())
if err != nil {
return "", err
}
t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot)
if err != nil {
t = time.Time{}
}
qps, err := sas.BlobSignatureValues{
ContainerName: urlParts.ContainerName,
BlobName: urlParts.BlobName,
SnapshotTime: t,
Version: sas.Version,
Permissions: permissions.String(),
StartTime: start.UTC(),
ExpiryTime: expiry.UTC(),
}.SignWithSharedKey(b.sharedKey())
if err != nil {
return "", err
}
endpoint := b.URL()
if !strings.HasSuffix(endpoint, "/") {
endpoint += "/"
}
endpoint += "?" + qps.Encode()
return endpoint, nil
}
// Concurrent Download Functions -----------------------------------------------------------------------------------------
// download downloads an Azure blob to a WriterAt in parallel.
func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) {
if o.BlockSize == 0 {
o.BlockSize = DefaultDownloadBlockSize
}
count := o.Range.Count
if count == CountToEnd { // If size not specified, calculate it
// If we don't have the length at all, get it
downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{}, nil)
dr, err := b.DownloadStream(ctx, downloadBlobOptions)
if err != nil {
return 0, err
}
count = *dr.ContentLength - o.Range.Offset
}
if count <= 0 {
// The file is empty, there is nothing to download.
return 0, nil
}
// Prepare and do parallel download.
progress := int64(0)
progressLock := &sync.Mutex{}
err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{
OperationName: "downloadBlobToWriterAt",
TransferSize: count,
ChunkSize: o.BlockSize,
Concurrency: o.Concurrency,
Operation: func(chunkStart int64, count int64, ctx context.Context) error {
downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{
Offset: chunkStart + o.Range.Offset,
Count: count,
}, nil)
dr, err := b.DownloadStream(ctx, downloadBlobOptions)
if err != nil {
return err
}
var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock)
if o.Progress != nil {
rangeProgress := int64(0)
body = streaming.NewResponseProgress(
body,
func(bytesTransferred int64) {
diff := bytesTransferred - rangeProgress
rangeProgress = bytesTransferred
progressLock.Lock()
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
_, err = io.Copy(shared.NewSectionWriter(writer, chunkStart, count), body)
if err != nil {
return err
}
err = body.Close()
return err
},
})
if err != nil {
return 0, err
}
return count, nil
}
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) {
downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions := o.format()
if o == nil {
o = &DownloadStreamOptions{}
}
dr, err := b.generated().Download(ctx, downloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
if err != nil {
return DownloadStreamResponse{}, err
}
return DownloadStreamResponse{
client: b,
BlobClientDownloadResponse: dr,
getInfo: httpGetterInfo{Range: o.Range, ETag: dr.ETag},
ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules),
cpkInfo: o.CpkInfo,
cpkScope: o.CpkScopeInfo,
}, err
}
// DownloadBuffer downloads an Azure blob to a buffer with parallel.
func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadBufferOptions) (int64, error) {
if o == nil {
o = &DownloadBufferOptions{}
}
return b.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o))
}
// DownloadFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFileOptions) (int64, error) {
if o == nil {
o = &DownloadFileOptions{}
}
do := (*downloadOptions)(o)
// 1. Calculate the size of the destination file
var size int64
count := do.Range.Count
if count == CountToEnd {
// Try to get Azure blob's size
getBlobPropertiesOptions := do.getBlobPropertiesOptions()
props, err := b.GetProperties(ctx, getBlobPropertiesOptions)
if err != nil {
return 0, err
}
size = *props.ContentLength - do.Range.Offset
} else {
size = count
}
// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
stat, err := file.Stat()
if err != nil {
return 0, err
}
if stat.Size() != size {
if err = file.Truncate(size); err != nil {
return 0, err
}
}
if size > 0 {
return b.download(ctx, file, *do)
} else { // if the blob's size is 0, there is no need in downloading it
return 0, nil
}
}

View file

@ -0,0 +1,241 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blob
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
const (
CountToEnd = 0
SnapshotTimeFormat = exported.SnapshotTimeFormat
// DefaultDownloadBlockSize is default block size
DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
)
// BlobType defines values for BlobType
type BlobType = generated.BlobType
const (
BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob
BlobTypePageBlob BlobType = generated.BlobTypePageBlob
BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob
)
// PossibleBlobTypeValues returns the possible values for the BlobType const type.
func PossibleBlobTypeValues() []BlobType {
return generated.PossibleBlobTypeValues()
}
// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType
type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType
const (
DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude
DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly
)
// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type.
func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType {
return generated.PossibleDeleteSnapshotsOptionTypeValues()
}
// AccessTier defines values for Blob Access Tier
type AccessTier = generated.AccessTier
const (
AccessTierArchive AccessTier = generated.AccessTierArchive
AccessTierCool AccessTier = generated.AccessTierCool
AccessTierHot AccessTier = generated.AccessTierHot
AccessTierP10 AccessTier = generated.AccessTierP10
AccessTierP15 AccessTier = generated.AccessTierP15
AccessTierP20 AccessTier = generated.AccessTierP20
AccessTierP30 AccessTier = generated.AccessTierP30
AccessTierP4 AccessTier = generated.AccessTierP4
AccessTierP40 AccessTier = generated.AccessTierP40
AccessTierP50 AccessTier = generated.AccessTierP50
AccessTierP6 AccessTier = generated.AccessTierP6
AccessTierP60 AccessTier = generated.AccessTierP60
AccessTierP70 AccessTier = generated.AccessTierP70
AccessTierP80 AccessTier = generated.AccessTierP80
AccessTierPremium AccessTier = generated.AccessTierPremium
)
// PossibleAccessTierValues returns the possible values for the AccessTier const type.
func PossibleAccessTierValues() []AccessTier {
return generated.PossibleAccessTierValues()
}
// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate.
// Valid values are High and Standard.
type RehydratePriority = generated.RehydratePriority
const (
RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh
RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard
)
// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type.
func PossibleRehydratePriorityValues() []RehydratePriority {
return generated.PossibleRehydratePriorityValues()
}
// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode
type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode
const (
ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable
ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked
ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked
)
// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type.
func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode {
return generated.PossibleImmutabilityPolicyModeValues()
}
// ImmutabilityPolicySetting returns the possible values for the ImmutabilityPolicySetting const type.
type ImmutabilityPolicySetting = generated.ImmutabilityPolicySetting
const (
ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingUnlocked
ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = generated.ImmutabilityPolicySettingLocked
)
// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type.
func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting {
return generated.PossibleImmutabilityPolicySettingValues()
}
// CopyStatusType defines values for CopyStatusType
type CopyStatusType = generated.CopyStatusType
const (
CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending
CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess
CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted
CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed
)
// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type.
func PossibleCopyStatusTypeValues() []CopyStatusType {
return generated.PossibleCopyStatusTypeValues()
}
// EncryptionAlgorithmType defines values for EncryptionAlgorithmType
type EncryptionAlgorithmType = generated.EncryptionAlgorithmType
const (
EncryptionAlgorithmTypeNone EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeNone
EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = generated.EncryptionAlgorithmTypeAES256
)
// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type.
func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType {
return generated.PossibleEncryptionAlgorithmTypeValues()
}
// ArchiveStatus defines values for ArchiveStatus
type ArchiveStatus = generated.ArchiveStatus
const (
ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool
ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot
)
// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type.
func PossibleArchiveStatusValues() []ArchiveStatus {
return generated.PossibleArchiveStatusValues()
}
// DeleteType defines values for DeleteType
type DeleteType = generated.DeleteType
const (
DeleteTypeNone DeleteType = generated.DeleteTypeNone
DeleteTypePermanent DeleteType = generated.DeleteTypePermanent
)
// PossibleDeleteTypeValues returns the possible values for the DeleteType const type.
func PossibleDeleteTypeValues() []DeleteType {
return generated.PossibleDeleteTypeValues()
}
// ExpiryOptions defines values for ExpiryOptions
type ExpiryOptions = generated.ExpiryOptions
const (
ExpiryOptionsAbsolute ExpiryOptions = generated.ExpiryOptionsAbsolute
ExpiryOptionsNeverExpire ExpiryOptions = generated.ExpiryOptionsNeverExpire
ExpiryOptionsRelativeToCreation ExpiryOptions = generated.ExpiryOptionsRelativeToCreation
ExpiryOptionsRelativeToNow ExpiryOptions = generated.ExpiryOptionsRelativeToNow
)
// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type.
func PossibleExpiryOptionsValues() []ExpiryOptions {
return generated.PossibleExpiryOptionsValues()
}
// QueryFormatType - The quick query format type.
type QueryFormatType = generated.QueryFormatType
const (
QueryFormatTypeDelimited QueryFormatType = generated.QueryFormatTypeDelimited
QueryFormatTypeJSON QueryFormatType = generated.QueryFormatTypeJSON
QueryFormatTypeArrow QueryFormatType = generated.QueryFormatTypeArrow
QueryFormatTypeParquet QueryFormatType = generated.QueryFormatTypeParquet
)
// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type.
func PossibleQueryFormatTypeValues() []QueryFormatType {
return generated.PossibleQueryFormatTypeValues()
}
// LeaseDurationType defines values for LeaseDurationType
type LeaseDurationType = generated.LeaseDurationType
const (
LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite
LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed
)
// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type.
func PossibleLeaseDurationTypeValues() []LeaseDurationType {
return generated.PossibleLeaseDurationTypeValues()
}
// LeaseStateType defines values for LeaseStateType
type LeaseStateType = generated.LeaseStateType
const (
LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable
LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased
LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired
LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking
LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken
)
// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type.
func PossibleLeaseStateTypeValues() []LeaseStateType {
return generated.PossibleLeaseStateTypeValues()
}
// LeaseStatusType defines values for LeaseStatusType
type LeaseStatusType = generated.LeaseStatusType
const (
LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked
LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked
)
// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type.
func PossibleLeaseStatusTypeValues() []LeaseStatusType {
return generated.PossibleLeaseStatusTypeValues()
}

View file

@ -0,0 +1,492 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blob
import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// SharedKeyCredential contains an account's name and its primary or secondary key.
type SharedKeyCredential = exported.SharedKeyCredential
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
// storage account's name and either its primary or secondary key.
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
return exported.NewSharedKeyCredential(accountName, accountKey)
}
// Type Declarations ---------------------------------------------------------------------
// AccessConditions identifies blob-specific access conditions which you optionally set.
type AccessConditions = exported.BlobAccessConditions
// LeaseAccessConditions contains optional parameters to access leased entity.
type LeaseAccessConditions = exported.LeaseAccessConditions
// ModifiedAccessConditions contains a group of parameters for specifying access conditions.
type ModifiedAccessConditions = exported.ModifiedAccessConditions
// CpkInfo contains a group of parameters for client provided encryption key.
type CpkInfo = generated.CpkInfo
// CpkScopeInfo contains a group of parameters for client provided encryption scope.
type CpkScopeInfo = generated.CpkScopeInfo
// HTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
type HTTPHeaders = generated.BlobHTTPHeaders
// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method.
type SourceModifiedAccessConditions = generated.SourceModifiedAccessConditions
// Tags represent map of blob index tags
type Tags = generated.BlobTag
// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and
// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange
// which has an offset but no zero value count indicates from the offset to the resource's end.
type HTTPRange = exported.HTTPRange
// Request Model Declaration -------------------------------------------------------------------------------------------
// DownloadStreamOptions contains the optional parameters for the Client.Download method.
type DownloadStreamOptions struct {
// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the
// range is less than or equal to 4 MB in size.
RangeGetContentMD5 *bool
// Range specifies a range of bytes. The default value is all bytes.
Range HTTPRange
AccessConditions *AccessConditions
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
}
func (o *DownloadStreamOptions) format() (*generated.BlobClientDownloadOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
basics := generated.BlobClientDownloadOptions{
RangeGetContentMD5: o.RangeGetContentMD5,
Range: exported.FormatHTTPRange(o.Range),
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// downloadOptions contains common options used by the DownloadBuffer and DownloadFile functions.
type downloadOptions struct {
// Range specifies a range of bytes. The default value is all bytes.
Range HTTPRange
// BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize.
BlockSize int64
// Progress is a function that is invoked periodically as bytes are received.
Progress func(bytesTransferred int64)
// BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
AccessConditions *AccessConditions
// ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
// Concurrency indicates the maximum number of blocks to download in parallel (0=default)
Concurrency uint16
// RetryReaderOptionsPerBlock is used when downloading each block.
RetryReaderOptionsPerBlock RetryReaderOptions
}
func (o *downloadOptions) getBlobPropertiesOptions() *GetPropertiesOptions {
if o == nil {
return nil
}
return &GetPropertiesOptions{
AccessConditions: o.AccessConditions,
CpkInfo: o.CpkInfo,
}
}
func (o *downloadOptions) getDownloadBlobOptions(rnge HTTPRange, rangeGetContentMD5 *bool) *DownloadStreamOptions {
if o == nil {
return nil
}
return &DownloadStreamOptions{
AccessConditions: o.AccessConditions,
CpkInfo: o.CpkInfo,
CpkScopeInfo: o.CpkScopeInfo,
Range: rnge,
RangeGetContentMD5: rangeGetContentMD5,
}
}
// DownloadBufferOptions contains the optional parameters for the DownloadBuffer method.
type DownloadBufferOptions struct {
// Range specifies a range of bytes. The default value is all bytes.
Range HTTPRange
// BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize.
BlockSize int64
// Progress is a function that is invoked periodically as bytes are received.
Progress func(bytesTransferred int64)
// BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
AccessConditions *AccessConditions
// CpkInfo contains a group of parameters for client provided encryption key.
CpkInfo *CpkInfo
// CpkScopeInfo contains a group of parameters for client provided encryption scope.
CpkScopeInfo *CpkScopeInfo
// Concurrency indicates the maximum number of blocks to download in parallel (0=default)
Concurrency uint16
// RetryReaderOptionsPerBlock is used when downloading each block.
RetryReaderOptionsPerBlock RetryReaderOptions
}
// DownloadFileOptions contains the optional parameters for the DownloadFile method.
type DownloadFileOptions struct {
// Range specifies a range of bytes. The default value is all bytes.
Range HTTPRange
// BlockSize specifies the block size to use for each parallel download; the default size is DefaultDownloadBlockSize.
BlockSize int64
// Progress is a function that is invoked periodically as bytes are received.
Progress func(bytesTransferred int64)
// BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob.
AccessConditions *AccessConditions
// ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
// Concurrency indicates the maximum number of blocks to download in parallel. The default value is 5.
Concurrency uint16
// RetryReaderOptionsPerBlock is used when downloading each block.
RetryReaderOptionsPerBlock RetryReaderOptions
}
// ---------------------------------------------------------------------------------------------------------------------
// DeleteOptions contains the optional parameters for the Client.Delete method.
type DeleteOptions struct {
// Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob
// and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself
DeleteSnapshots *DeleteSnapshotsOptionType
AccessConditions *AccessConditions
}
func (o *DeleteOptions) format() (*generated.BlobClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
basics := generated.BlobClientDeleteOptions{
DeleteSnapshots: o.DeleteSnapshots,
}
if o.AccessConditions == nil {
return &basics, nil, nil
}
return &basics, o.AccessConditions.LeaseAccessConditions, o.AccessConditions.ModifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// UndeleteOptions contains the optional parameters for the Client.Undelete method.
type UndeleteOptions struct {
// placeholder for future options
}
func (o *UndeleteOptions) format() *generated.BlobClientUndeleteOptions {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// SetTierOptions contains the optional parameters for the Client.SetTier method.
type SetTierOptions struct {
// Optional: Indicates the priority with which to rehydrate an archived blob.
RehydratePriority *RehydratePriority
AccessConditions *AccessConditions
}
func (o *SetTierOptions) format() (*generated.BlobClientSetTierOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &generated.BlobClientSetTierOptions{RehydratePriority: o.RehydratePriority}, leaseAccessConditions, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method
type GetPropertiesOptions struct {
AccessConditions *AccessConditions
CpkInfo *CpkInfo
}
func (o *GetPropertiesOptions) format() (*generated.BlobClientGetPropertiesOptions,
*generated.LeaseAccessConditions, *generated.CpkInfo, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// SetHTTPHeadersOptions contains the optional parameters for the Client.SetHTTPHeaders method.
type SetHTTPHeadersOptions struct {
AccessConditions *AccessConditions
}
func (o *SetHTTPHeadersOptions) format() (*generated.BlobClientSetHTTPHeadersOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return nil, leaseAccessConditions, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// SetMetadataOptions provides set of configurations for Set Metadata on blob operation
type SetMetadataOptions struct {
AccessConditions *AccessConditions
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
}
func (o *SetMetadataOptions) format() (*generated.LeaseAccessConditions, *CpkInfo,
*CpkScopeInfo, *ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// CreateSnapshotOptions contains the optional parameters for the Client.CreateSnapshot method.
type CreateSnapshotOptions struct {
Metadata map[string]string
AccessConditions *AccessConditions
CpkInfo *CpkInfo
CpkScopeInfo *CpkScopeInfo
}
func (o *CreateSnapshotOptions) format() (*generated.BlobClientCreateSnapshotOptions, *generated.CpkInfo,
*generated.CpkScopeInfo, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &generated.BlobClientCreateSnapshotOptions{
Metadata: o.Metadata,
}, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// StartCopyFromURLOptions contains the optional parameters for the Client.StartCopyFromURL method.
type StartCopyFromURLOptions struct {
// Specifies the date time when the blobs immutability policy is set to expire.
ImmutabilityPolicyExpiry *time.Time
// Specifies the immutability policy mode to set on the blob.
ImmutabilityPolicyMode *ImmutabilityPolicySetting
// Specified if a legal hold should be set on the blob.
LegalHold *bool
// Optional. Used to set blob tags in various blob operations.
BlobTags map[string]string
// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
// are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
// blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
// See Naming and Referencing Containers, Blobs, and Metadata for more information.
Metadata map[string]string
// Optional: Indicates the priority with which to rehydrate an archived blob.
RehydratePriority *RehydratePriority
// Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer.
SealBlob *bool
// Optional. Indicates the tier to be set on the blob.
Tier *AccessTier
SourceModifiedAccessConditions *SourceModifiedAccessConditions
AccessConditions *AccessConditions
}
func (o *StartCopyFromURLOptions) format() (*generated.BlobClientStartCopyFromURLOptions,
*generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
basics := generated.BlobClientStartCopyFromURLOptions{
BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags),
Metadata: o.Metadata,
RehydratePriority: o.RehydratePriority,
SealBlob: o.SealBlob,
Tier: o.Tier,
ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry,
ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
LegalHold: o.LegalHold,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &basics, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// AbortCopyFromURLOptions contains the optional parameters for the Client.AbortCopyFromURL method.
type AbortCopyFromURLOptions struct {
LeaseAccessConditions *LeaseAccessConditions
}
func (o *AbortCopyFromURLOptions) format() (*generated.BlobClientAbortCopyFromURLOptions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil
}
return nil, o.LeaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// SetTagsOptions contains the optional parameters for the Client.SetTags method.
type SetTagsOptions struct {
// The version id parameter is an opaque DateTime value that, when present,
// specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer.
VersionID *string
// Optional header, Specifies the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 []byte
// Optional header, Specifies the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
AccessConditions *AccessConditions
}
func (o *SetTagsOptions) format() (*generated.BlobClientSetTagsOptions, *ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil
}
options := &generated.BlobClientSetTagsOptions{
TransactionalContentMD5: o.TransactionalContentMD5,
TransactionalContentCRC64: o.TransactionalContentCRC64,
VersionID: o.VersionID,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return options, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// GetTagsOptions contains the optional parameters for the Client.GetTags method.
type GetTagsOptions struct {
// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve.
Snapshot *string
// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
// It's for service version 2019-10-10 and newer.
VersionID *string
BlobAccessConditions *AccessConditions
}
func (o *GetTagsOptions) format() (*generated.BlobClientGetTagsOptions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil
}
options := &generated.BlobClientGetTagsOptions{
Snapshot: o.Snapshot,
VersionID: o.VersionID,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions)
return options, modifiedAccessConditions, leaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// CopyFromURLOptions contains the optional parameters for the Client.CopyFromURL method.
type CopyFromURLOptions struct {
// Optional. Used to set blob tags in various blob operations.
BlobTags map[string]string
// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
CopySourceAuthorization *string
// Specifies the date time when the blobs immutability policy is set to expire.
ImmutabilityPolicyExpiry *time.Time
// Specifies the immutability policy mode to set on the blob.
ImmutabilityPolicyMode *ImmutabilityPolicySetting
// Specified if a legal hold should be set on the blob.
LegalHold *bool
// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
// operation will copy the metadata from the source blob or file to the destination
// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
// is not copied from the source blob or file. Note that beginning with
// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
// Blobs, and Metadata for more information.
Metadata map[string]string
// Specify the md5 calculated for the range of bytes that must be read from the copy source.
SourceContentMD5 []byte
// Optional. Indicates the tier to be set on the blob.
Tier *AccessTier
SourceModifiedAccessConditions *SourceModifiedAccessConditions
BlobAccessConditions *AccessConditions
}
func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil, nil, nil
}
options := &generated.BlobClientCopyFromURLOptions{
BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.BlobTags),
CopySourceAuthorization: o.CopySourceAuthorization,
ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry,
ImmutabilityPolicyMode: o.ImmutabilityPolicyMode,
LegalHold: o.LegalHold,
Metadata: o.Metadata,
SourceContentMD5: o.SourceContentMD5,
Tier: o.Tier,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions)
return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions
}

View file

@ -0,0 +1,104 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blob
import (
"context"
"io"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// DownloadStreamResponse contains the response from the DownloadStream method.
// To read from the stream, read from the Body field, or call the NewRetryReader method.
type DownloadStreamResponse struct {
generated.BlobClientDownloadResponse
ObjectReplicationRules []ObjectReplicationPolicy
client *Client
getInfo httpGetterInfo
cpkInfo *CpkInfo
cpkScope *CpkScopeInfo
}
// NewRetryReader constructs new RetryReader stream for reading data. If a connection fails while
// reading, it will make additional requests to reestablish a connection and continue reading.
// Pass nil for options to accept the default options.
// Callers of this method should not access the DowloadStreamResponse.Body field.
func (r *DownloadStreamResponse) NewRetryReader(ctx context.Context, options *RetryReaderOptions) *RetryReader {
if options == nil {
options = &RetryReaderOptions{}
}
return newRetryReader(ctx, r.Body, r.getInfo, func(ctx context.Context, getInfo httpGetterInfo) (io.ReadCloser, error) {
accessConditions := &AccessConditions{
ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: getInfo.ETag},
}
options := DownloadStreamOptions{
Range: getInfo.Range,
AccessConditions: accessConditions,
CpkInfo: r.cpkInfo,
CpkScopeInfo: r.cpkScope,
}
resp, err := r.client.DownloadStream(ctx, &options)
if err != nil {
return nil, err
}
return resp.Body, err
}, *options)
}
// DeleteResponse contains the response from method BlobClient.Delete.
type DeleteResponse = generated.BlobClientDeleteResponse
// UndeleteResponse contains the response from method BlobClient.Undelete.
type UndeleteResponse = generated.BlobClientUndeleteResponse
// SetTierResponse contains the response from method BlobClient.SetTier.
type SetTierResponse = generated.BlobClientSetTierResponse
// GetPropertiesResponse contains the response from method BlobClient.GetProperties.
type GetPropertiesResponse = generated.BlobClientGetPropertiesResponse
// SetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders.
type SetHTTPHeadersResponse = generated.BlobClientSetHTTPHeadersResponse
// SetMetadataResponse contains the response from method BlobClient.SetMetadata.
type SetMetadataResponse = generated.BlobClientSetMetadataResponse
// CreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot.
type CreateSnapshotResponse = generated.BlobClientCreateSnapshotResponse
// StartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL.
type StartCopyFromURLResponse = generated.BlobClientStartCopyFromURLResponse
// AbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL.
type AbortCopyFromURLResponse = generated.BlobClientAbortCopyFromURLResponse
// SetTagsResponse contains the response from method BlobClient.SetTags.
type SetTagsResponse = generated.BlobClientSetTagsResponse
// GetTagsResponse contains the response from method BlobClient.GetTags.
type GetTagsResponse = generated.BlobClientGetTagsResponse
// CopyFromURLResponse contains the response from method BlobClient.CopyFromURL.
type CopyFromURLResponse = generated.BlobClientCopyFromURLResponse
// AcquireLeaseResponse contains the response from method BlobClient.AcquireLease.
type AcquireLeaseResponse = generated.BlobClientAcquireLeaseResponse
// BreakLeaseResponse contains the response from method BlobClient.BreakLease.
type BreakLeaseResponse = generated.BlobClientBreakLeaseResponse
// ChangeLeaseResponse contains the response from method BlobClient.ChangeLease.
type ChangeLeaseResponse = generated.BlobClientChangeLeaseResponse
// ReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease.
type ReleaseLeaseResponse = generated.BlobClientReleaseLeaseResponse
// RenewLeaseResponse contains the response from method BlobClient.RenewLease.
type RenewLeaseResponse = generated.BlobClientRenewLeaseResponse

View file

@ -2,57 +2,46 @@
// +build go1.18 // +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved. // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. // Licensed under the MIT License. See License.txt in the project root for license information.
package azblob package blob
import ( import (
"context" "context"
"io" "io"
"net" "net"
"net/http"
"strings" "strings"
"sync" "sync"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
) )
const CountToEnd = 0
// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. // HTTPGetter is a function type that refers to a method that performs an HTTP GET operation.
type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) type httpGetter func(ctx context.Context, i httpGetterInfo) (io.ReadCloser, error)
// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters // HTTPGetterInfo is passed to an HTTPGetter function passing it parameters
// that should be used to make an HTTP GET request. // that should be used to make an HTTP GET request.
type HTTPGetterInfo struct { type httpGetterInfo struct {
// Offset specifies the start offset that should be used when Range HTTPRange
// creating the HTTP GET request's Range header
Offset int64
// Count specifies the count of bytes that should be used to calculate
// the end offset when creating the HTTP GET request's Range header
Count int64
// ETag specifies the resource's etag that should be used when creating // ETag specifies the resource's etag that should be used when creating
// the HTTP GET request's If-Match header // the HTTP GET request's If-Match header
ETag string ETag *azcore.ETag
} }
// FailedReadNotifier is a function type that represents the notification function called when a read fails // RetryReaderOptions configures the retry reader's behavior.
type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool) // Zero-value fields will have their specified default values applied during use.
// This allows for modification of a subset of fields.
// RetryReaderOptions contains properties which can help to decide when to do retry.
type RetryReaderOptions struct { type RetryReaderOptions struct {
// MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made // MaxRetries specifies the maximum number of attempts a failed read will be retried
// while reading from a RetryReader. A value of zero means that no additional HTTP // before producing an error.
// GET requests will be made. // The default value is three.
MaxRetryRequests int MaxRetries int32
doInjectError bool
doInjectErrorRound int
injectedError error
// NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging. // OnFailedRead, when non-nil, is called after any failure to read. Expected usage is diagnostic logging.
NotifyFailedRead FailedReadNotifier OnFailedRead func(failureCount int32, lastError error, rnge HTTPRange, willRetry bool)
// TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, // EarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default,
// retryReader has the following special behaviour: closing the response body before it is all read is treated as a // retryReader has the following special behaviour: closing the response body before it is all read is treated as a
// retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the =
// read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If
@ -61,52 +50,59 @@ type RetryReaderOptions struct {
// Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens
// from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors
// which will be retried. // which will be retried.
TreatEarlyCloseAsError bool // The default value is false.
EarlyCloseAsError bool
CpkInfo *CpkInfo doInjectError bool
CpkScopeInfo *CpkScopeInfo doInjectErrorRound int32
injectedError error
} }
// retryReader implements io.ReaderCloser methods. // RetryReader attempts to read from response, and if there is retriable network error
// retryReader tries to read from response, and if there is retriable network error
// returned during reading, it will retry according to retry reader option through executing // returned during reading, it will retry according to retry reader option through executing
// user defined action with provided data to get a new response, and continue the overall reading process // user defined action with provided data to get a new response, and continue the overall reading process
// through reading from the new response. // through reading from the new response.
type retryReader struct { // RetryReader implements the io.ReadCloser interface.
ctx context.Context type RetryReader struct {
info HTTPGetterInfo ctx context.Context
countWasBounded bool info httpGetterInfo
o RetryReaderOptions retryReaderOptions RetryReaderOptions
getter HTTPGetter getter httpGetter
countWasBounded bool
// we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response
responseMu *sync.Mutex responseMu *sync.Mutex
response *http.Response response io.ReadCloser
} }
// NewRetryReader creates a retry reader. // newRetryReader creates a retry reader.
func NewRetryReader(ctx context.Context, initialResponse *http.Response, func newRetryReader(ctx context.Context, initialResponse io.ReadCloser, info httpGetterInfo, getter httpGetter, o RetryReaderOptions) *RetryReader {
info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { if o.MaxRetries < 1 {
return &retryReader{ o.MaxRetries = 3
ctx: ctx, }
getter: getter, return &RetryReader{
info: info, ctx: ctx,
countWasBounded: info.Count != CountToEnd, getter: getter,
response: initialResponse, info: info,
responseMu: &sync.Mutex{}, countWasBounded: info.Range.Count != CountToEnd,
o: o} response: initialResponse,
responseMu: &sync.Mutex{},
retryReaderOptions: o,
}
} }
func (s *retryReader) setResponse(r *http.Response) { // setResponse function
func (s *RetryReader) setResponse(r io.ReadCloser) {
s.responseMu.Lock() s.responseMu.Lock()
defer s.responseMu.Unlock() defer s.responseMu.Unlock()
s.response = r s.response = r
} }
func (s *retryReader) Read(p []byte) (n int, err error) { // Read from retry reader
for try := 0; ; try++ { func (s *RetryReader) Read(p []byte) (n int, err error) {
for try := int32(0); ; try++ {
//fmt.Println(try) // Comment out for debugging. //fmt.Println(try) // Comment out for debugging.
if s.countWasBounded && s.info.Count == CountToEnd { if s.countWasBounded && s.info.Range.Count == CountToEnd {
// User specified an original count and the remaining bytes are 0, return 0, EOF // User specified an original count and the remaining bytes are 0, return 0, EOF
return 0, io.EOF return 0, io.EOF
} }
@ -123,12 +119,12 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
s.setResponse(newResponse) s.setResponse(newResponse)
resp = newResponse resp = newResponse
} }
n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) n, err := resp.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running)
// Injection mechanism for testing. // Injection mechanism for testing.
if s.o.doInjectError && try == s.o.doInjectErrorRound { if s.retryReaderOptions.doInjectError && try == s.retryReaderOptions.doInjectErrorRound {
if s.o.injectedError != nil { if s.retryReaderOptions.injectedError != nil {
err = s.o.injectedError err = s.retryReaderOptions.injectedError
} else { } else {
err = &net.DNSError{IsTemporary: true} err = &net.DNSError{IsTemporary: true}
} }
@ -136,9 +132,9 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
// We successfully read data or end EOF. // We successfully read data or end EOF.
if err == nil || err == io.EOF { if err == nil || err == io.EOF {
s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future s.info.Range.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future
if s.info.Count != CountToEnd { if s.info.Range.Count != CountToEnd {
s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future s.info.Range.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future
} }
return n, err // Return the return to the caller return n, err // Return the return to the caller
} }
@ -147,15 +143,15 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
s.setResponse(nil) // Our stream is no longer good s.setResponse(nil) // Our stream is no longer good
// Check the retry count and error code, and decide whether to retry. // Check the retry count and error code, and decide whether to retry.
retriesExhausted := try >= s.o.MaxRetryRequests retriesExhausted := try >= s.retryReaderOptions.MaxRetries
_, isNetError := err.(net.Error) _, isNetError := err.(net.Error)
isUnexpectedEOF := err == io.ErrUnexpectedEOF isUnexpectedEOF := err == io.ErrUnexpectedEOF
willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted
// Notify, for logging purposes, of any failures // Notify, for logging purposes, of any failures
if s.o.NotifyFailedRead != nil { if s.retryReaderOptions.OnFailedRead != nil {
failureCount := try + 1 // because try is zero-based failureCount := try + 1 // because try is zero-based
s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry) s.retryReaderOptions.OnFailedRead(failureCount, err, s.info.Range, willRetry)
} }
if willRetry { if willRetry {
@ -174,21 +170,23 @@ func (s *retryReader) Read(p []byte) (n int, err error) {
// then there are two different types of error that may happen - either the one one we check for here, // then there are two different types of error that may happen - either the one one we check for here,
// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine // or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine
// to check for one, since the other is a net.Error, which our main Read retry loop is already handing. // to check for one, since the other is a net.Error, which our main Read retry loop is already handing.
func (s *retryReader) wasRetryableEarlyClose(err error) bool { func (s *RetryReader) wasRetryableEarlyClose(err error) bool {
if s.o.TreatEarlyCloseAsError { if s.retryReaderOptions.EarlyCloseAsError {
return false // user wants all early closes to be errors, and so not retryable return false // user wants all early closes to be errors, and so not retryable
} }
// unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text
return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage)
} }
// ReadOnClosedBodyMessage of retry reader
const ReadOnClosedBodyMessage = "read on closed response body" const ReadOnClosedBodyMessage = "read on closed response body"
func (s *retryReader) Close() error { // Close retry reader
func (s *RetryReader) Close() error {
s.responseMu.Lock() s.responseMu.Lock()
defer s.responseMu.Unlock() defer s.responseMu.Unlock()
if s.response != nil && s.response.Body != nil { if s.response != nil {
return s.response.Body.Close() return s.response.Close()
} }
return nil return nil
} }

View file

@ -0,0 +1,79 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blob
import (
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
)
// ObjectReplicationRules struct
type ObjectReplicationRules struct {
RuleId string
Status string
}
// ObjectReplicationPolicy are deserialized attributes
type ObjectReplicationPolicy struct {
PolicyId *string
Rules *[]ObjectReplicationRules
}
// deserializeORSPolicies is utility function to deserialize ORS Policies
func deserializeORSPolicies(policies map[string]string) (objectReplicationPolicies []ObjectReplicationPolicy) {
if policies == nil {
return nil
}
// For source blobs (blobs that have policy ids and rule ids applied to them),
// the header will be formatted as "x-ms-or-<policy_id>_<rule_id>: {Complete, Failed}".
// The value of this header is the status of the replication.
orPolicyStatusHeader := make(map[string]string)
for key, value := range policies {
if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" {
orPolicyStatusHeader[key] = value
}
}
parsedResult := make(map[string][]ObjectReplicationRules)
for key, value := range orPolicyStatusHeader {
policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_")
policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1]
parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleId: ruleId, Status: value})
}
for policyId, rules := range parsedResult {
objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{
PolicyId: &policyId,
Rules: &rules,
})
}
return
}
// ParseHTTPHeaders parses GetPropertiesResponse and returns HTTPHeaders
func ParseHTTPHeaders(resp GetPropertiesResponse) HTTPHeaders {
return HTTPHeaders{
BlobContentType: resp.ContentType,
BlobContentEncoding: resp.ContentEncoding,
BlobContentLanguage: resp.ContentLanguage,
BlobContentDisposition: resp.ContentDisposition,
BlobCacheControl: resp.CacheControl,
BlobContentMD5: resp.ContentMD5,
}
}
// URLParts object represents the components that make up an Azure Storage Container/Blob URL.
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
type URLParts = sas.URLParts
// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object.
func ParseURL(u string) (URLParts, error) {
return sas.ParseURL(u)
}

View file

@ -2,9 +2,9 @@
// +build go1.18 // +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved. // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. // Licensed under the MIT License. See License.txt in the project root for license information.
package azblob package blockblob
import ( import (
"bytes" "bytes"
@ -13,19 +13,19 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal"
"io" "io"
"sync" "sync"
"sync/atomic" "sync/atomic"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
) )
// blockWriter provides methods to upload blocks that represent a file to a server and commit them. // blockWriter provides methods to upload blocks that represent a file to a server and commit them.
// This allows us to provide a local implementation that fakes the server for hermetic testing. // This allows us to provide a local implementation that fakes the server for hermetic testing.
type blockWriter interface { type blockWriter interface {
StageBlock(context.Context, string, io.ReadSeekCloser, *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) StageBlock(context.Context, string, io.ReadSeekCloser, *StageBlockOptions) (StageBlockResponse, error)
CommitBlockList(context.Context, []string, *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error)
} }
// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. // copyFromReader copies a source io.Reader to blob storage using concurrent uploads.
@ -36,9 +36,9 @@ type blockWriter interface {
// well, 4 MiB or 8 MiB, and auto-scale to as many goroutines within the memory limit. This gives a single dial to tweak and we can // well, 4 MiB or 8 MiB, and auto-scale to as many goroutines within the memory limit. This gives a single dial to tweak and we can
// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). // choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model).
// We can even provide a utility to dial this number in for customer networks to optimize their copies. // We can even provide a utility to dial this number in for customer networks to optimize their copies.
func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) { func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (CommitBlockListResponse, error) {
if err := o.defaults(); err != nil { if err := o.format(); err != nil {
return BlockBlobCommitBlockListResponse{}, err return CommitBlockListResponse{}, err
} }
ctx, cancel := context.WithCancel(ctx) ctx, cancel := context.WithCancel(ctx)
@ -47,7 +47,7 @@ func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o Uploa
var err error var err error
generatedUuid, err := uuid.New() generatedUuid, err := uuid.New()
if err != nil { if err != nil {
return BlockBlobCommitBlockListResponse{}, err return CommitBlockListResponse{}, err
} }
cp := &copier{ cp := &copier{
@ -68,12 +68,12 @@ func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o Uploa
} }
// If the error is not EOF, then we have a problem. // If the error is not EOF, then we have a problem.
if err != nil && !errors.Is(err, io.EOF) { if err != nil && !errors.Is(err, io.EOF) {
return BlockBlobCommitBlockListResponse{}, err return CommitBlockListResponse{}, err
} }
// Close out our upload. // Close out our upload.
if err := cp.close(); err != nil { if err := cp.close(); err != nil {
return BlockBlobCommitBlockListResponse{}, err return CommitBlockListResponse{}, err
} }
return cp.result, nil return cp.result, nil
@ -109,9 +109,10 @@ type copier struct {
wg sync.WaitGroup wg sync.WaitGroup
// result holds the final result from blob storage after we have submitted all chunks. // result holds the final result from blob storage after we have submitted all chunks.
result BlockBlobCommitBlockListResponse result CommitBlockListResponse
} }
// copierChunk contains buffer
type copierChunk struct { type copierChunk struct {
buffer []byte buffer []byte
id string id string
@ -136,17 +137,17 @@ func (c *copier) sendChunk() error {
return err return err
} }
buffer := c.o.TransferManager.Get() buffer := c.o.transferManager.Get()
if len(buffer) == 0 { if len(buffer) == 0 {
return fmt.Errorf("TransferManager returned a 0 size buffer, this is a bug in the manager") return fmt.Errorf("transferManager returned a 0 size buffer, this is a bug in the manager")
} }
n, err := io.ReadFull(c.reader, buffer) n, err := io.ReadFull(c.reader, buffer)
if n > 0 { if n > 0 {
// Some data was read, schedule the write. // Some data was read, schedule the Write.
id := c.id.next() id := c.id.next()
c.wg.Add(1) c.wg.Add(1)
c.o.TransferManager.Run( c.o.transferManager.Run(
func() { func() {
defer c.wg.Done() defer c.wg.Done()
c.write(copierChunk{buffer: buffer, id: id, length: n}) c.write(copierChunk{buffer: buffer, id: id, length: n})
@ -154,7 +155,7 @@ func (c *copier) sendChunk() error {
) )
} else { } else {
// Return the unused buffer to the manager. // Return the unused buffer to the manager.
c.o.TransferManager.Put(buffer) c.o.transferManager.Put(buffer)
} }
if err == nil { if err == nil {
@ -172,16 +173,20 @@ func (c *copier) sendChunk() error {
// write uploads a chunk to blob storage. // write uploads a chunk to blob storage.
func (c *copier) write(chunk copierChunk) { func (c *copier) write(chunk copierChunk) {
defer c.o.TransferManager.Put(chunk.buffer) defer c.o.transferManager.Put(chunk.buffer)
if err := c.ctx.Err(); err != nil { if err := c.ctx.Err(); err != nil {
return return
} }
stageBlockOptions := c.o.getStageBlockOptions() stageBlockOptions := c.o.getStageBlockOptions()
_, err := c.to.StageBlock(c.ctx, chunk.id, internal.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions) _, err := c.to.StageBlock(c.ctx, chunk.id, shared.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions)
if err != nil { if err != nil {
c.errCh <- fmt.Errorf("write error: %w", err) select {
return case c.errCh <- err:
// failed to stage block, cancel the copy
default:
// don't block the goroutine if there's a pending error
}
} }
} }

View file

@ -0,0 +1,468 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import (
"bytes"
"context"
"encoding/base64"
"errors"
"io"
"os"
"sync"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
// Client defines a set of operations applicable to block blobs.
type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient]
// NewClient creates a Client object using the specified URL, Azure AD credential, and options.
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithNoCredential creates a Client object using the specified URL and options.
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithSharedKeyCredential creates a Client object using the specified URL, shared key, and options.
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil
}
// NewClientFromConnectionString creates Client from a connection String
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
parsed, err := shared.ParseConnectionString(connectionString)
if err != nil {
return nil, err
}
parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName)
if parsed.AccountKey != "" && parsed.AccountName != "" {
credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey)
if err != nil {
return nil, err
}
return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options)
}
return NewClientWithNoCredential(parsed.ServiceURL, options)
}
func (bb *Client) sharedKey() *blob.SharedKeyCredential {
return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb))
}
func (bb *Client) generated() *generated.BlockBlobClient {
_, blockBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb))
return blockBlob
}
// URL returns the URL endpoint used by the Client object.
func (bb *Client) URL() string {
return bb.generated().Endpoint()
}
// BlobClient returns the embedded blob client for this AppendBlob client.
func (bb *Client) BlobClient() *blob.Client {
blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb))
return (*blob.Client)(blobClient)
}
// WithSnapshot creates a new Client object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (bb *Client) WithSnapshot(snapshot string) (*Client, error) {
p, err := blob.ParseURL(bb.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (bb *Client) WithVersionID(versionID string) (*Client, error) {
p, err := blob.ParseURL(bb.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil
}
// Upload creates a new block blob or overwrites an existing block blob.
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
// supported with Upload; the content of the existing blob is overwritten with the new content. To
// perform a partial update of a block blob, use StageBlock and CommitBlockList.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *UploadOptions) (UploadResponse, error) {
count, err := shared.ValidateSeekableStreamAt0AndGetCount(body)
if err != nil {
return UploadResponse{}, err
}
opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format()
resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions)
return resp, err
}
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
func (bb *Client) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, options *StageBlockOptions) (StageBlockResponse, error) {
count, err := shared.ValidateSeekableStreamAt0AndGetCount(body)
if err != nil {
return StageBlockResponse{}, err
}
opts, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format()
resp, err := bb.generated().StageBlock(ctx, base64BlockID, count, body, opts, leaseAccessConditions, cpkInfo, cpkScopeInfo)
return resp, err
}
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// If count is CountToEnd (0), then data is read from specified offset to the end.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
func (bb *Client) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string,
contentLength int64, options *StageBlockFromURLOptions) (StageBlockFromURLResponse, error) {
stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format()
resp, err := bb.generated().StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions,
cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions)
return resp, err
}
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
// In order to be written as part of a blob, a block must have been successfully written
// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob
// by uploading only those blocks that have changed, then committing the new and existing
// blocks together. Any blocks not specified in the block list and permanently deleted.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *CommitBlockListOptions) (CommitBlockListResponse, error) {
// this is a code smell in the generated code
blockIds := make([]*string, len(base64BlockIDs))
for k, v := range base64BlockIDs {
blockIds[k] = to.Ptr(v)
}
blockLookupList := generated.BlockLookupList{Latest: blockIds}
var commitOptions *generated.BlockBlobClientCommitBlockListOptions
var headers *generated.BlobHTTPHeaders
var leaseAccess *blob.LeaseAccessConditions
var cpkInfo *generated.CpkInfo
var cpkScope *generated.CpkScopeInfo
var modifiedAccess *generated.ModifiedAccessConditions
if options != nil {
commitOptions = &generated.BlockBlobClientCommitBlockListOptions{
BlobTagsString: shared.SerializeBlobTagsToStrPtr(options.Tags),
Metadata: options.Metadata,
RequestID: options.RequestID,
Tier: options.Tier,
Timeout: options.Timeout,
TransactionalContentCRC64: options.TransactionalContentCRC64,
TransactionalContentMD5: options.TransactionalContentMD5,
}
headers = options.HTTPHeaders
leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions)
cpkInfo = options.CpkInfo
cpkScope = options.CpkScopeInfo
}
resp, err := bb.generated().CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess)
return resp, err
}
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
func (bb *Client) GetBlockList(ctx context.Context, listType BlockListType, options *GetBlockListOptions) (GetBlockListResponse, error) {
o, lac, mac := options.format()
resp, err := bb.generated().GetBlockList(ctx, listType, o, lac, mac)
return resp, err
}
// Redeclared APIs ----- Copy over to Append blob and Page blob as well.
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (bb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) {
return bb.BlobClient().Delete(ctx, o)
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
func (bb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) {
return bb.BlobClient().Undelete(ctx, o)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (bb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) {
return bb.BlobClient().SetTier(ctx, tier, o)
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (bb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) {
return bb.BlobClient().GetProperties(ctx, o)
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
return bb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o)
}
// SetMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (bb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) {
return bb.BlobClient().SetMetadata(ctx, metadata, o)
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
func (bb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) {
return bb.BlobClient().CreateSnapshot(ctx, o)
}
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (bb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) {
return bb.BlobClient().StartCopyFromURL(ctx, copySource, o)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
func (bb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) {
return bb.BlobClient().AbortCopyFromURL(ctx, copyID, o)
}
// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
// Each call to this operation replaces all existing tags attached to the blob.
// To remove all tags from the blob, call this operation with no tags set.
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
func (bb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) {
return bb.BlobClient().SetTags(ctx, tags, o)
}
// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
func (bb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) {
return bb.BlobClient().GetTags(ctx, o)
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) {
return bb.BlobClient().CopyFromURL(ctx, copySource, o)
}
// Concurrent Upload Functions -----------------------------------------------------------------------------------------
// uploadFromReader uploads a buffer in blocks to a block blob.
func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, readerSize int64, o *uploadFromReaderOptions) (uploadFromReaderResponse, error) {
if o.BlockSize == 0 {
// If bufferSize > (MaxStageBlockBytes * MaxBlocks), then error
if readerSize > MaxStageBlockBytes*MaxBlocks {
return uploadFromReaderResponse{}, errors.New("buffer is too large to upload to a block blob")
}
// If bufferSize <= MaxUploadBlobBytes, then Upload should be used with just 1 I/O request
if readerSize <= MaxUploadBlobBytes {
o.BlockSize = MaxUploadBlobBytes // Default if unspecified
} else {
if remainder := readerSize % MaxBlocks; remainder > 0 {
// ensure readerSize is a multiple of MaxBlocks
readerSize += (MaxBlocks - remainder)
}
o.BlockSize = readerSize / MaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
if o.BlockSize < blob.DefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
o.BlockSize = blob.DefaultDownloadBlockSize
}
// StageBlock will be called with blockSize blocks and a Concurrency of (BufferSize / BlockSize).
}
}
if readerSize <= MaxUploadBlobBytes {
// If the size can fit in 1 Upload call, do it this way
var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize)
if o.Progress != nil {
body = streaming.NewRequestProgress(shared.NopCloser(body), o.Progress)
}
uploadBlockBlobOptions := o.getUploadBlockBlobOptions()
resp, err := bb.Upload(ctx, shared.NopCloser(body), uploadBlockBlobOptions)
return toUploadReaderAtResponseFromUploadResponse(resp), err
}
var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1)
if numBlocks > MaxBlocks {
// prevent any math bugs from attempting to upload too many blocks which will always fail
return uploadFromReaderResponse{}, errors.New("block limit exceeded")
}
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
progress := int64(0)
progressLock := &sync.Mutex{}
err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{
OperationName: "uploadFromReader",
TransferSize: readerSize,
ChunkSize: o.BlockSize,
Concurrency: o.Concurrency,
Operation: func(offset int64, count int64, ctx context.Context) error {
// This function is called once per block.
// It is passed this block's offset within the buffer and its count of bytes
// Prepare to read the proper block/section of the buffer
var body io.ReadSeeker = io.NewSectionReader(reader, offset, count)
blockNum := offset / o.BlockSize
if o.Progress != nil {
blockProgress := int64(0)
body = streaming.NewRequestProgress(shared.NopCloser(body),
func(bytesTransferred int64) {
diff := bytesTransferred - blockProgress
blockProgress = bytesTransferred
progressLock.Lock() // 1 goroutine at a time gets progress report
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
generatedUuid, err := uuid.New()
if err != nil {
return err
}
blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String()))
stageBlockOptions := o.getStageBlockOptions()
_, err = bb.StageBlock(ctx, blockIDList[blockNum], shared.NopCloser(body), stageBlockOptions)
return err
},
})
if err != nil {
return uploadFromReaderResponse{}, err
}
// All put blocks were successful, call Put Block List to finalize the blob
commitBlockListOptions := o.getCommitBlockListOptions()
resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions)
return toUploadReaderAtResponseFromCommitBlockListResponse(resp), err
}
// UploadBuffer uploads a buffer in blocks to a block blob.
func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) {
uploadOptions := uploadFromReaderOptions{}
if o != nil {
uploadOptions = *o
}
return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions)
}
// UploadFile uploads a file in blocks to a block blob.
func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) {
stat, err := file.Stat()
if err != nil {
return uploadFromReaderResponse{}, err
}
uploadOptions := uploadFromReaderOptions{}
if o != nil {
uploadOptions = *o
}
return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions)
}
// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient.
// A Context deadline or cancellation will cause this to error.
func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) {
if err := o.format(); err != nil {
return CommitBlockListResponse{}, err
}
if o == nil {
o = &UploadStreamOptions{}
}
// If we used the default manager, we need to close it.
if o.transferMangerNotSet {
defer o.transferManager.Close()
}
result, err := copyFromReader(ctx, body, bb, *o)
if err != nil {
return CommitBlockListResponse{}, err
}
return result, nil
}
// Concurrent Download Functions -----------------------------------------------------------------------------------------
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (bb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) {
return bb.BlobClient().DownloadStream(ctx, o)
}
// DownloadBuffer downloads an Azure blob to a buffer with parallel.
func (bb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) {
return bb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o)
}
// DownloadFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
func (bb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) {
return bb.BlobClient().DownloadFile(ctx, file, o)
}

View file

@ -0,0 +1,40 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
// nolint
const (
// CountToEnd specifies the end of the file
CountToEnd = 0
_1MiB = 1024 * 1024
// MaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
MaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
// MaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
MaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB
// MaxBlocks indicates the maximum number of blocks allowed in a block blob.
MaxBlocks = 50000
)
// BlockListType defines values for BlockListType
type BlockListType = generated.BlockListType
const (
BlockListTypeCommitted BlockListType = generated.BlockListTypeCommitted
BlockListTypeUncommitted BlockListType = generated.BlockListTypeUncommitted
BlockListTypeAll BlockListType = generated.BlockListTypeAll
)
// PossibleBlockListTypeValues returns the possible values for the BlockListType const type.
func PossibleBlockListTypeValues() []BlockListType {
return generated.PossibleBlockListTypeValues()
}

View file

@ -0,0 +1,311 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import (
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// Type Declarations ---------------------------------------------------------------------
// Block - Represents a single block in a block blob. It describes the block's ID and size.
type Block = generated.Block
// BlockList - type of blocklist (committed/uncommitted)
type BlockList = generated.BlockList
// Request Model Declaration -------------------------------------------------------------------------------------------
// UploadOptions contains the optional parameters for the Client.Upload method.
type UploadOptions struct {
// Optional. Used to set blob tags in various blob operations.
Tags map[string]string
// Optional. Specifies a user-defined name-value pair associated with the blob.
Metadata map[string]string
// Optional. Indicates the tier to be set on the blob.
Tier *blob.AccessTier
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
HTTPHeaders *blob.HTTPHeaders
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
AccessConditions *blob.AccessConditions
}
func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions,
*generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil, nil
}
basics := generated.BlockBlobClientUploadOptions{
BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags),
Metadata: o.Metadata,
Tier: o.Tier,
TransactionalContentMD5: o.TransactionalContentMD5,
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// StageBlockOptions contains the optional parameters for the Client.StageBlock method.
type StageBlockOptions struct {
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
LeaseAccessConditions *blob.LeaseAccessConditions
// Specify the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 []byte
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte
}
// StageBlockOptions contains the optional parameters for the Client.StageBlock method.
func (o *StageBlockOptions) format() (*generated.BlockBlobClientStageBlockOptions, *generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo) {
if o == nil {
return nil, nil, nil, nil
}
return &generated.BlockBlobClientStageBlockOptions{
TransactionalContentCRC64: o.TransactionalContentCRC64,
TransactionalContentMD5: o.TransactionalContentMD5,
}, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo
}
// ---------------------------------------------------------------------------------------------------------------------
// StageBlockFromURLOptions contains the optional parameters for the Client.StageBlockFromURL method.
type StageBlockFromURLOptions struct {
// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
CopySourceAuthorization *string
LeaseAccessConditions *blob.LeaseAccessConditions
SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions
// Specify the md5 calculated for the range of bytes that must be read from the copy source.
SourceContentMD5 []byte
// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
SourceContentCRC64 []byte
// Range specifies a range of bytes. The default value is all bytes.
Range blob.HTTPRange
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
}
func (o *StageBlockFromURLOptions) format() (*generated.BlockBlobClientStageBlockFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.LeaseAccessConditions, *generated.SourceModifiedAccessConditions) {
if o == nil {
return nil, nil, nil, nil, nil
}
options := &generated.BlockBlobClientStageBlockFromURLOptions{
CopySourceAuthorization: o.CopySourceAuthorization,
SourceContentMD5: o.SourceContentMD5,
SourceContentcrc64: o.SourceContentCRC64,
SourceRange: exported.FormatHTTPRange(o.Range),
}
return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// CommitBlockListOptions contains the optional parameters for Client.CommitBlockList method.
type CommitBlockListOptions struct {
Tags map[string]string
Metadata map[string]string
RequestID *string
Tier *blob.AccessTier
Timeout *int32
TransactionalContentCRC64 []byte
TransactionalContentMD5 []byte
HTTPHeaders *blob.HTTPHeaders
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
AccessConditions *blob.AccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// GetBlockListOptions contains the optional parameters for the Client.GetBlockList method.
type GetBlockListOptions struct {
Snapshot *string
AccessConditions *blob.AccessConditions
}
func (o *GetBlockListOptions) format() (*generated.BlockBlobClientGetBlockListOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &generated.BlockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions
}
// ------------------------------------------------------------
// uploadFromReaderOptions identifies options used by the UploadBuffer and UploadFile functions.
type uploadFromReaderOptions struct {
// BlockSize specifies the block size to use; the default (and maximum size) is MaxStageBlockBytes.
BlockSize int64
// Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient.
// Note that the progress reporting is not always increasing; it can go down when retrying a request.
Progress func(bytesTransferred int64)
// HTTPHeaders indicates the HTTP headers to be associated with the blob.
HTTPHeaders *blob.HTTPHeaders
// Metadata indicates the metadata to be associated with the blob when PutBlockList is called.
Metadata map[string]string
// AccessConditions indicates the access conditions for the block blob.
AccessConditions *blob.AccessConditions
// AccessTier indicates the tier of blob
AccessTier *blob.AccessTier
// BlobTags
Tags map[string]string
// ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data.
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
// Concurrency indicates the maximum number of blocks to upload in parallel (0=default)
Concurrency uint16
// Optional header, Specifies the transactional crc64 for the body, to be validated by the service.
TransactionalContentCRC64 *[]byte
// Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 *[]byte
}
// UploadBufferOptions provides set of configurations for UploadBuffer operation
type UploadBufferOptions = uploadFromReaderOptions
// UploadFileOptions provides set of configurations for UploadFile operation
type UploadFileOptions = uploadFromReaderOptions
func (o *uploadFromReaderOptions) getStageBlockOptions() *StageBlockOptions {
leaseAccessConditions, _ := exported.FormatBlobAccessConditions(o.AccessConditions)
return &StageBlockOptions{
CpkInfo: o.CpkInfo,
CpkScopeInfo: o.CpkScopeInfo,
LeaseAccessConditions: leaseAccessConditions,
}
}
func (o *uploadFromReaderOptions) getUploadBlockBlobOptions() *UploadOptions {
return &UploadOptions{
Tags: o.Tags,
Metadata: o.Metadata,
Tier: o.AccessTier,
HTTPHeaders: o.HTTPHeaders,
AccessConditions: o.AccessConditions,
CpkInfo: o.CpkInfo,
CpkScopeInfo: o.CpkScopeInfo,
}
}
func (o *uploadFromReaderOptions) getCommitBlockListOptions() *CommitBlockListOptions {
return &CommitBlockListOptions{
Tags: o.Tags,
Metadata: o.Metadata,
Tier: o.AccessTier,
HTTPHeaders: o.HTTPHeaders,
CpkInfo: o.CpkInfo,
CpkScopeInfo: o.CpkScopeInfo,
}
}
// ---------------------------------------------------------------------------------------------------------------------
// UploadStreamOptions provides set of configurations for UploadStream operation
type UploadStreamOptions struct {
// transferManager provides a transferManager that controls buffer allocation/reuse and
// concurrency. This overrides BlockSize and MaxConcurrency if set.
transferManager shared.TransferManager
transferMangerNotSet bool
// BlockSize defines the size of the buffer used during upload. The default and mimimum value is 1 MiB.
BlockSize int
// Concurrency defines the number of concurrent uploads to be performed to upload the file.
// Each concurrent upload will create a buffer of size BlockSize. The default value is one.
Concurrency int
HTTPHeaders *blob.HTTPHeaders
Metadata map[string]string
AccessConditions *blob.AccessConditions
AccessTier *blob.AccessTier
Tags map[string]string
CpkInfo *blob.CpkInfo
CpkScopeInfo *blob.CpkScopeInfo
}
func (u *UploadStreamOptions) format() error {
if u == nil || u.transferManager != nil {
return nil
}
if u.Concurrency == 0 {
u.Concurrency = 1
}
if u.BlockSize < _1MiB {
u.BlockSize = _1MiB
}
var err error
u.transferManager, err = shared.NewStaticBuffer(u.BlockSize, u.Concurrency)
if err != nil {
return fmt.Errorf("bug: default transfer manager could not be created: %s", err)
}
u.transferMangerNotSet = true
return nil
}
func (u *UploadStreamOptions) getStageBlockOptions() *StageBlockOptions {
leaseAccessConditions, _ := exported.FormatBlobAccessConditions(u.AccessConditions)
return &StageBlockOptions{
CpkInfo: u.CpkInfo,
CpkScopeInfo: u.CpkScopeInfo,
LeaseAccessConditions: leaseAccessConditions,
}
}
func (u *UploadStreamOptions) getCommitBlockListOptions() *CommitBlockListOptions {
options := &CommitBlockListOptions{
Tags: u.Tags,
Metadata: u.Metadata,
Tier: u.AccessTier,
HTTPHeaders: u.HTTPHeaders,
CpkInfo: u.CpkInfo,
CpkScopeInfo: u.CpkScopeInfo,
AccessConditions: u.AccessConditions,
}
return options
}

View file

@ -0,0 +1,111 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package blockblob
import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// UploadResponse contains the response from method Client.Upload.
type UploadResponse = generated.BlockBlobClientUploadResponse
// StageBlockResponse contains the response from method Client.StageBlock.
type StageBlockResponse = generated.BlockBlobClientStageBlockResponse
// CommitBlockListResponse contains the response from method Client.CommitBlockList.
type CommitBlockListResponse = generated.BlockBlobClientCommitBlockListResponse
// StageBlockFromURLResponse contains the response from method Client.StageBlockFromURL.
type StageBlockFromURLResponse = generated.BlockBlobClientStageBlockFromURLResponse
// GetBlockListResponse contains the response from method Client.GetBlockList.
type GetBlockListResponse = generated.BlockBlobClientGetBlockListResponse
// uploadFromReaderResponse contains the response from method Client.UploadBuffer/Client.UploadFile.
type uploadFromReaderResponse struct {
// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
ClientRequestID *string
// ContentMD5 contains the information returned from the Content-MD5 header response.
ContentMD5 []byte
// Date contains the information returned from the Date header response.
Date *time.Time
// ETag contains the information returned from the ETag header response.
ETag *azcore.ETag
// EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
EncryptionKeySHA256 *string
// EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
EncryptionScope *string
// IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
IsServerEncrypted *bool
// LastModified contains the information returned from the Last-Modified header response.
LastModified *time.Time
// RequestID contains the information returned from the x-ms-request-id header response.
RequestID *string
// Version contains the information returned from the x-ms-version header response.
Version *string
// VersionID contains the information returned from the x-ms-version-id header response.
VersionID *string
// ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
// Will be a part of response only if uploading data >= internal.MaxUploadBlobBytes (= 256 * 1024 * 1024 // 256MB)
ContentCRC64 []byte
}
func toUploadReaderAtResponseFromUploadResponse(resp UploadResponse) uploadFromReaderResponse {
return uploadFromReaderResponse{
ClientRequestID: resp.ClientRequestID,
ContentMD5: resp.ContentMD5,
Date: resp.Date,
ETag: resp.ETag,
EncryptionKeySHA256: resp.EncryptionKeySHA256,
EncryptionScope: resp.EncryptionScope,
IsServerEncrypted: resp.IsServerEncrypted,
LastModified: resp.LastModified,
RequestID: resp.RequestID,
Version: resp.Version,
VersionID: resp.VersionID,
}
}
func toUploadReaderAtResponseFromCommitBlockListResponse(resp CommitBlockListResponse) uploadFromReaderResponse {
return uploadFromReaderResponse{
ClientRequestID: resp.ClientRequestID,
ContentMD5: resp.ContentMD5,
Date: resp.Date,
ETag: resp.ETag,
EncryptionKeySHA256: resp.EncryptionKeySHA256,
EncryptionScope: resp.EncryptionScope,
IsServerEncrypted: resp.IsServerEncrypted,
LastModified: resp.LastModified,
RequestID: resp.RequestID,
Version: resp.Version,
VersionID: resp.VersionID,
ContentCRC64: resp.XMSContentCRC64,
}
}
// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile.
type UploadFileResponse = uploadFromReaderResponse
// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile.
type UploadBufferResponse = uploadFromReaderResponse
// UploadStreamResponse contains the response from method Client.CommitBlockList.
type UploadStreamResponse = CommitBlockListResponse

View file

@ -0,0 +1,160 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package azblob
import (
"context"
"io"
"os"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
// Client represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
type Client struct {
svc *service.Client
}
// NewClient creates a BlobClient object using the specified URL, Azure AD credential, and options.
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
var clientOptions *service.ClientOptions
if options != nil {
clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions}
}
svcClient, err := service.NewClient(serviceURL, cred, clientOptions)
if err != nil {
return nil, err
}
return &Client{
svc: svcClient,
}, nil
}
// NewClientWithNoCredential creates a BlobClient object using the specified URL and options.
func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) {
var clientOptions *service.ClientOptions
if options != nil {
clientOptions = &service.ClientOptions{ClientOptions: options.ClientOptions}
}
svcClient, err := service.NewClientWithNoCredential(serviceURL, clientOptions)
if err != nil {
return nil, err
}
return &Client{
svc: svcClient,
}, nil
}
// NewClientWithSharedKeyCredential creates a BlobClient object using the specified URL, shared key, and options.
func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
svcClient, err := service.NewClientWithSharedKeyCredential(serviceURL, cred, (*service.ClientOptions)(options))
if err != nil {
return nil, err
}
return &Client{
svc: svcClient,
}, nil
}
// NewClientFromConnectionString creates BlobClient from a connection String
func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) {
if options == nil {
options = &ClientOptions{}
}
containerClient, err := service.NewClientFromConnectionString(connectionString, (*service.ClientOptions)(options))
if err != nil {
return nil, err
}
return &Client{
svc: containerClient,
}, nil
}
// URL returns the URL endpoint used by the BlobClient object.
func (c *Client) URL() string {
return c.svc.URL()
}
// CreateContainer is a lifecycle method to creates a new container under the specified account.
// If the container with the same name already exists, a ResourceExistsError will be raised.
// This method returns a client with which to interact with the newly created container.
func (c *Client) CreateContainer(ctx context.Context, containerName string, o *CreateContainerOptions) (CreateContainerResponse, error) {
return c.svc.CreateContainer(ctx, containerName, o)
}
// DeleteContainer is a lifecycle method that marks the specified container for deletion.
// The container and any blobs contained within it are later deleted during garbage collection.
// If the container is not found, a ResourceNotFoundError will be raised.
func (c *Client) DeleteContainer(ctx context.Context, containerName string, o *DeleteContainerOptions) (DeleteContainerResponse, error) {
return c.svc.DeleteContainer(ctx, containerName, o)
}
// DeleteBlob marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (c *Client) DeleteBlob(ctx context.Context, containerName string, blobName string, o *DeleteBlobOptions) (DeleteBlobResponse, error) {
return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).Delete(ctx, o)
}
// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c *Client) NewListBlobsFlatPager(containerName string, o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] {
return c.svc.NewContainerClient(containerName).NewListBlobsFlatPager(o)
}
// NewListContainersPager operation returns a pager of the containers under the specified account.
// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2.
func (c *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] {
return c.svc.NewListContainersPager(o)
}
// UploadBuffer uploads a buffer in blocks to a block blob.
func (c *Client) UploadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *UploadBufferOptions) (UploadBufferResponse, error) {
return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadBuffer(ctx, buffer, o)
}
// UploadFile uploads a file in blocks to a block blob.
func (c *Client) UploadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *UploadFileOptions) (UploadFileResponse, error) {
return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadFile(ctx, file, o)
}
// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient.
// A Context deadline or cancellation will cause this to error.
func (c *Client) UploadStream(ctx context.Context, containerName string, blobName string, body io.Reader, o *UploadStreamOptions) (UploadStreamResponse, error) {
return c.svc.NewContainerClient(containerName).NewBlockBlobClient(blobName).UploadStream(ctx, body, o)
}
// DownloadBuffer downloads an Azure blob to a buffer with parallel.
func (c *Client) DownloadBuffer(ctx context.Context, containerName string, blobName string, buffer []byte, o *DownloadBufferOptions) (int64, error) {
return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o)
}
// DownloadFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
func (c *Client) DownloadFile(ctx context.Context, containerName string, blobName string, file *os.File, o *DownloadFileOptions) (int64, error) {
return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadFile(ctx, file, o)
}
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (c *Client) DownloadStream(ctx context.Context, containerName string, blobName string, o *DownloadStreamOptions) (DownloadStreamResponse, error) {
o = shared.CopyOptions(o)
return c.svc.NewContainerClient(containerName).NewBlobClient(blobName).DownloadStream(ctx, o)
}

View file

@ -0,0 +1,36 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package azblob
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
)
// SharedKeyCredential contains an account's name and its primary or secondary key.
type SharedKeyCredential = exported.SharedKeyCredential
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
// storage account's name and either its primary or secondary key.
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
return exported.NewSharedKeyCredential(accountName, accountKey)
}
// URLParts object represents the components that make up an Azure Storage Container/Blob URL.
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
type URLParts = sas.URLParts
// ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters. Any other
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the URLParts object.
func ParseURL(u string) (URLParts, error) {
return sas.ParseURL(u)
}
// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and
// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange
// which has an offset but no zero value count indicates from the offset to the resource's end.
type HTTPRange = exported.HTTPRange

View file

@ -1,39 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package azblob
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
type connection struct {
u string
p runtime.Pipeline
}
// newConnection creates an instance of the connection type with the specified endpoint.
// Pass nil to accept the default options; this is the same as passing a zero-value options.
func newConnection(endpoint string, options *azcore.ClientOptions) *connection {
cp := azcore.ClientOptions{}
if options != nil {
cp = *options
}
return &connection{u: endpoint, p: runtime.NewPipeline(moduleName, moduleVersion, runtime.PipelineOptions{}, &cp)}
}
// Endpoint returns the connection's endpoint.
func (c *connection) Endpoint() string {
return c.u
}
// Pipeline returns the connection's pipeline.
func (c *connection) Pipeline() runtime.Pipeline {
return c.p
}

View file

@ -2,45 +2,36 @@
// +build go1.18 // +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved. // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. // Licensed under the MIT License. See License.txt in the project root for license information.
package azblob package azblob
var SASVersion = "2019-12-12" import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
//nolint
const (
// BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload.
BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB
// BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock.
BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB
// BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob.
BlockBlobMaxBlocks = 50000
// PageBlobPageBytes indicates the number of bytes in a page (512).
PageBlobPageBytes = 512
// BlobDefaultDownloadBlockSize is default block size
BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB
) )
const ( // PublicAccessType defines values for AccessType - private (default) or blob or container
headerAuthorization = "Authorization" type PublicAccessType = generated.PublicAccessType
headerXmsDate = "x-ms-date"
headerContentLength = "Content-Length"
headerContentEncoding = "Content-Encoding"
headerContentLanguage = "Content-Language"
headerContentType = "Content-Type"
headerContentMD5 = "Content-MD5"
headerIfModifiedSince = "If-Modified-Since"
headerIfMatch = "If-Match"
headerIfNoneMatch = "If-None-Match"
headerIfUnmodifiedSince = "If-Unmodified-Since"
headerRange = "Range"
)
const ( const (
tokenScope = "https://storage.azure.com/.default" PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob
PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer
) )
// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type.
func PossiblePublicAccessTypeValues() []PublicAccessType {
return generated.PossiblePublicAccessTypeValues()
}
// DeleteSnapshotsOptionType defines values for DeleteSnapshotsOptionType
type DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionType
const (
DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeInclude
DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = generated.DeleteSnapshotsOptionTypeOnly
)
// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type.
func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType {
return generated.PossibleDeleteSnapshotsOptionTypeValues()
}

View file

@ -0,0 +1,327 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package container
import (
"context"
"errors"
"net/http"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
// Client represents a URL to the Azure Storage container allowing you to manipulate its blobs.
type Client base.Client[generated.ContainerClient]
// NewClient creates a Client object using the specified URL, Azure AD credential, and options.
func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil
}
// NewClientWithNoCredential creates a Client object using the specified URL and options.
func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil
}
// NewClientWithSharedKeyCredential creates a Client object using the specified URL, shared key, and options.
func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewContainerClient(containerURL, pl, cred)), nil
}
// NewClientFromConnectionString creates a Client object using connection string of an account
func NewClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*Client, error) {
parsed, err := shared.ParseConnectionString(connectionString)
if err != nil {
return nil, err
}
parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName)
if parsed.AccountKey != "" && parsed.AccountName != "" {
credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey)
if err != nil {
return nil, err
}
return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options)
}
return NewClientWithNoCredential(parsed.ServiceURL, options)
}
func (c *Client) generated() *generated.ContainerClient {
return base.InnerClient((*base.Client[generated.ContainerClient])(c))
}
func (c *Client) sharedKey() *SharedKeyCredential {
return base.SharedKey((*base.Client[generated.ContainerClient])(c))
}
// URL returns the URL endpoint used by the Client object.
func (c *Client) URL() string {
return c.generated().Endpoint()
}
// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of
// Client's URL. The new BlobClient uses the same request policy pipeline as the Client.
// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's
// NewBlobClient method.
func (c *Client) NewBlobClient(blobName string) *blob.Client {
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
}
// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of
// Client's URL. The new AppendBlobURL uses the same request policy pipeline as the Client.
// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's
// NewAppendBlobClient method.
func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client {
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
}
// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of
// Client's URL. The new BlockBlobClient uses the same request policy pipeline as the Client.
// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's
// NewBlockBlobClient method.
func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client {
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
}
// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of Client's URL. The new PageBlobURL uses the same request policy pipeline as the Client.
// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's
// NewPageBlobClient method.
func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client {
blobURL := runtime.JoinPaths(c.URL(), blobName)
return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey()))
}
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
func (c *Client) Create(ctx context.Context, options *CreateOptions) (CreateResponse, error) {
var opts *generated.ContainerClientCreateOptions
var cpkScopes *generated.ContainerCpkScopeInfo
if options != nil {
opts = &generated.ContainerClientCreateOptions{
Access: options.Access,
Metadata: options.Metadata,
}
cpkScopes = options.CpkScopeInfo
}
resp, err := c.generated().Create(ctx, opts, cpkScopes)
return resp, err
}
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
func (c *Client) Delete(ctx context.Context, options *DeleteOptions) (DeleteResponse, error) {
opts, leaseAccessConditions, modifiedAccessConditions := options.format()
resp, err := c.generated().Delete(ctx, opts, leaseAccessConditions, modifiedAccessConditions)
return resp, err
}
// Restore operation restore the contents and properties of a soft deleted container to a specified container.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/restore-container.
func (c *Client) Restore(ctx context.Context, deletedContainerVersion string, options *RestoreOptions) (RestoreResponse, error) {
urlParts, err := blob.ParseURL(c.URL())
if err != nil {
return RestoreResponse{}, err
}
opts := &generated.ContainerClientRestoreOptions{
DeletedContainerName: &urlParts.ContainerName,
DeletedContainerVersion: &deletedContainerVersion,
}
resp, err := c.generated().Restore(ctx, opts)
return resp, err
}
// GetProperties returns the container's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
func (c *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) {
// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
// This allows us to not expose a GetProperties method at all simplifying the API.
// The optionals are nil, like they were in track 1.5
opts, leaseAccessConditions := o.format()
resp, err := c.generated().GetProperties(ctx, opts, leaseAccessConditions)
return resp, err
}
// SetMetadata sets the container's metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
func (c *Client) SetMetadata(ctx context.Context, o *SetMetadataOptions) (SetMetadataResponse, error) {
metadataOptions, lac, mac := o.format()
resp, err := c.generated().SetMetadata(ctx, metadataOptions, lac, mac)
return resp, err
}
// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl.
func (c *Client) GetAccessPolicy(ctx context.Context, o *GetAccessPolicyOptions) (GetAccessPolicyResponse, error) {
options, ac := o.format()
resp, err := c.generated().GetAccessPolicy(ctx, options, ac)
return resp, err
}
// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
func (c *Client) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, o *SetAccessPolicyOptions) (SetAccessPolicyResponse, error) {
accessPolicy, mac, lac := o.format()
resp, err := c.generated().SetAccessPolicy(ctx, containerACL, accessPolicy, mac, lac)
return resp, err
}
// NewListBlobsFlatPager returns a pager for blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[ListBlobsFlatResponse] {
listOptions := generated.ContainerClientListBlobFlatSegmentOptions{}
if o != nil {
listOptions.Include = o.Include.format()
listOptions.Marker = o.Marker
listOptions.Maxresults = o.MaxResults
listOptions.Prefix = o.Prefix
}
return runtime.NewPager(runtime.PagingHandler[ListBlobsFlatResponse]{
More: func(page ListBlobsFlatResponse) bool {
return page.NextMarker != nil && len(*page.NextMarker) > 0
},
Fetcher: func(ctx context.Context, page *ListBlobsFlatResponse) (ListBlobsFlatResponse, error) {
var req *policy.Request
var err error
if page == nil {
req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions)
} else {
listOptions.Marker = page.NextMarker
req, err = c.generated().ListBlobFlatSegmentCreateRequest(ctx, &listOptions)
}
if err != nil {
return ListBlobsFlatResponse{}, err
}
resp, err := c.generated().Pipeline().Do(req)
if err != nil {
return ListBlobsFlatResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
// TOOD: storage error?
return ListBlobsFlatResponse{}, runtime.NewResponseError(resp)
}
return c.generated().ListBlobFlatSegmentHandleResponse(resp)
},
})
}
// NewListBlobsHierarchyPager returns a channel of blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the
// previously-returned Marker) to get the next segment.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored.
// AutoPagerBufferSize specifies the channel's buffer size.
// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.)
func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierarchyOptions) *runtime.Pager[ListBlobsHierarchyResponse] {
listOptions := o.format()
return runtime.NewPager(runtime.PagingHandler[ListBlobsHierarchyResponse]{
More: func(page ListBlobsHierarchyResponse) bool {
return page.NextMarker != nil && len(*page.NextMarker) > 0
},
Fetcher: func(ctx context.Context, page *ListBlobsHierarchyResponse) (ListBlobsHierarchyResponse, error) {
var req *policy.Request
var err error
if page == nil {
req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions)
} else {
listOptions.Marker = page.NextMarker
req, err = c.generated().ListBlobHierarchySegmentCreateRequest(ctx, delimiter, &listOptions)
}
if err != nil {
return ListBlobsHierarchyResponse{}, err
}
resp, err := c.generated().Pipeline().Do(req)
if err != nil {
return ListBlobsHierarchyResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return ListBlobsHierarchyResponse{}, runtime.NewResponseError(resp)
}
return c.generated().ListBlobHierarchySegmentHandleResponse(resp)
},
})
}
// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
func (c *Client) GetSASURL(permissions sas.ContainerPermissions, start time.Time, expiry time.Time) (string, error) {
if c.sharedKey() == nil {
return "", errors.New("SAS can only be signed with a SharedKeyCredential")
}
urlParts, err := blob.ParseURL(c.URL())
if err != nil {
return "", err
}
// Containers do not have snapshots, nor versions.
qps, err := sas.BlobSignatureValues{
Version: sas.Version,
Protocol: sas.ProtocolHTTPS,
ContainerName: urlParts.ContainerName,
Permissions: permissions.String(),
StartTime: start.UTC(),
ExpiryTime: expiry.UTC(),
}.SignWithSharedKey(c.sharedKey())
if err != nil {
return "", err
}
endpoint := c.URL()
if !strings.HasSuffix(endpoint, "/") {
endpoint += "/"
}
endpoint += "?" + qps.Encode()
return endpoint, nil
}

View file

@ -0,0 +1,166 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package container
import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
// PublicAccessType defines values for AccessType - private (default) or blob or container
type PublicAccessType = generated.PublicAccessType
const (
PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob
PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer
)
// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type.
func PossiblePublicAccessTypeValues() []PublicAccessType {
return generated.PossiblePublicAccessTypeValues()
}
// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS
type SKUName = generated.SKUName
const (
SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS
SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS
SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS
SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS
SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS
)
// PossibleSKUNameValues returns the possible values for the SKUName const type.
func PossibleSKUNameValues() []SKUName {
return generated.PossibleSKUNameValues()
}
// AccountKind defines values for AccountKind
type AccountKind = generated.AccountKind
const (
AccountKindStorage AccountKind = generated.AccountKindStorage
AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage
AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2
AccountKindFileStorage AccountKind = generated.AccountKindFileStorage
AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage
)
// PossibleAccountKindValues returns the possible values for the AccountKind const type.
func PossibleAccountKindValues() []AccountKind {
return generated.PossibleAccountKindValues()
}
// BlobType defines values for BlobType
type BlobType = generated.BlobType
const (
BlobTypeBlockBlob BlobType = generated.BlobTypeBlockBlob
BlobTypePageBlob BlobType = generated.BlobTypePageBlob
BlobTypeAppendBlob BlobType = generated.BlobTypeAppendBlob
)
// PossibleBlobTypeValues returns the possible values for the BlobType const type.
func PossibleBlobTypeValues() []BlobType {
return generated.PossibleBlobTypeValues()
}
// LeaseStatusType defines values for LeaseStatusType
type LeaseStatusType = generated.LeaseStatusType
const (
LeaseStatusTypeLocked LeaseStatusType = generated.LeaseStatusTypeLocked
LeaseStatusTypeUnlocked LeaseStatusType = generated.LeaseStatusTypeUnlocked
)
// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type.
func PossibleLeaseStatusTypeValues() []LeaseStatusType {
return generated.PossibleLeaseStatusTypeValues()
}
// LeaseDurationType defines values for LeaseDurationType
type LeaseDurationType = generated.LeaseDurationType
const (
LeaseDurationTypeInfinite LeaseDurationType = generated.LeaseDurationTypeInfinite
LeaseDurationTypeFixed LeaseDurationType = generated.LeaseDurationTypeFixed
)
// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type.
func PossibleLeaseDurationTypeValues() []LeaseDurationType {
return generated.PossibleLeaseDurationTypeValues()
}
// LeaseStateType defines values for LeaseStateType
type LeaseStateType = generated.LeaseStateType
const (
LeaseStateTypeAvailable LeaseStateType = generated.LeaseStateTypeAvailable
LeaseStateTypeLeased LeaseStateType = generated.LeaseStateTypeLeased
LeaseStateTypeExpired LeaseStateType = generated.LeaseStateTypeExpired
LeaseStateTypeBreaking LeaseStateType = generated.LeaseStateTypeBreaking
LeaseStateTypeBroken LeaseStateType = generated.LeaseStateTypeBroken
)
// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type.
func PossibleLeaseStateTypeValues() []LeaseStateType {
return generated.PossibleLeaseStateTypeValues()
}
// ArchiveStatus defines values for ArchiveStatus
type ArchiveStatus = generated.ArchiveStatus
const (
ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool
ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot
)
// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type.
func PossibleArchiveStatusValues() []ArchiveStatus {
return generated.PossibleArchiveStatusValues()
}
// CopyStatusType defines values for CopyStatusType
type CopyStatusType = generated.CopyStatusType
const (
CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending
CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess
CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted
CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed
)
// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type.
func PossibleCopyStatusTypeValues() []CopyStatusType {
return generated.PossibleCopyStatusTypeValues()
}
// ImmutabilityPolicyMode defines values for ImmutabilityPolicyMode
type ImmutabilityPolicyMode = generated.ImmutabilityPolicyMode
const (
ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeMutable
ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeUnlocked
ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = generated.ImmutabilityPolicyModeLocked
)
// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type.
func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode {
return generated.PossibleImmutabilityPolicyModeValues()
}
// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate.
// Valid values are High and Standard.
type RehydratePriority = generated.RehydratePriority
const (
RehydratePriorityHigh RehydratePriority = generated.RehydratePriorityHigh
RehydratePriorityStandard RehydratePriority = generated.RehydratePriorityStandard
)
// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type.
func PossibleRehydratePriorityValues() []RehydratePriority {
return generated.PossibleRehydratePriorityValues()
}

View file

@ -0,0 +1,263 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package container
import (
"reflect"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// SharedKeyCredential contains an account's name and its primary or secondary key.
type SharedKeyCredential = exported.SharedKeyCredential
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
// storage account's name and either its primary or secondary key.
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
return exported.NewSharedKeyCredential(accountName, accountKey)
}
// Request Model Declaration -------------------------------------------------------------------------------------------
// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method.
type CpkScopeInfo = generated.ContainerCpkScopeInfo
// BlobProperties - Properties of a blob
type BlobProperties = generated.BlobPropertiesInternal
// BlobItem - An Azure Storage blob
type BlobItem = generated.BlobItemInternal
// AccessConditions identifies container-specific access conditions which you optionally set.
type AccessConditions = exported.ContainerAccessConditions
// LeaseAccessConditions contains optional parameters to access leased entity.
type LeaseAccessConditions = exported.LeaseAccessConditions
// ModifiedAccessConditions contains a group of parameters for specifying access conditions.
type ModifiedAccessConditions = exported.ModifiedAccessConditions
// AccessPolicy - An Access policy
type AccessPolicy = generated.AccessPolicy
// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy.
// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field.
type AccessPolicyPermission = exported.AccessPolicyPermission
// SignedIdentifier - signed identifier
type SignedIdentifier = generated.SignedIdentifier
// Request Model Declaration -------------------------------------------------------------------------------------------
// CreateOptions contains the optional parameters for the Client.Create method.
type CreateOptions struct {
// Specifies whether data in the container may be accessed publicly and the level of access
Access *PublicAccessType
// Optional. Specifies a user-defined name-value pair associated with the blob.
Metadata map[string]string
// Optional. Specifies the encryption scope settings to set on the container.
CpkScopeInfo *CpkScopeInfo
}
// ---------------------------------------------------------------------------------------------------------------------
// DeleteOptions contains the optional parameters for the Client.Delete method.
type DeleteOptions struct {
AccessConditions *AccessConditions
}
func (o *DeleteOptions) format() (*generated.ContainerClientDeleteOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
leaseAccessConditions, modifiedAccessConditions := exported.FormatContainerAccessConditions(o.AccessConditions)
return nil, leaseAccessConditions, modifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// RestoreOptions contains the optional parameters for the Client.Restore method.
type RestoreOptions struct {
// placeholder for future options
}
// ---------------------------------------------------------------------------------------------------------------------
// GetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method.
type GetPropertiesOptions struct {
LeaseAccessConditions *LeaseAccessConditions
}
// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method.
func (o *GetPropertiesOptions) format() (*generated.ContainerClientGetPropertiesOptions, *generated.LeaseAccessConditions) {
if o == nil {
return nil, nil
}
return nil, o.LeaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// ListBlobsInclude indicates what additional information the service should return with each blob.
type ListBlobsInclude struct {
Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool
}
func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem {
if reflect.ValueOf(l).IsZero() {
return nil
}
include := []generated.ListBlobsIncludeItem{}
if l.Copy {
include = append(include, generated.ListBlobsIncludeItemCopy)
}
if l.Deleted {
include = append(include, generated.ListBlobsIncludeItemDeleted)
}
if l.DeletedWithVersions {
include = append(include, generated.ListBlobsIncludeItemDeletedwithversions)
}
if l.ImmutabilityPolicy {
include = append(include, generated.ListBlobsIncludeItemImmutabilitypolicy)
}
if l.LegalHold {
include = append(include, generated.ListBlobsIncludeItemLegalhold)
}
if l.Metadata {
include = append(include, generated.ListBlobsIncludeItemMetadata)
}
if l.Snapshots {
include = append(include, generated.ListBlobsIncludeItemSnapshots)
}
if l.Tags {
include = append(include, generated.ListBlobsIncludeItemTags)
}
if l.UncommittedBlobs {
include = append(include, generated.ListBlobsIncludeItemUncommittedblobs)
}
if l.Versions {
include = append(include, generated.ListBlobsIncludeItemVersions)
}
return include
}
// ListBlobsFlatOptions contains the optional parameters for the ContainerClient.ListBlobFlatSegment method.
type ListBlobsFlatOptions struct {
// Include this parameter to specify one or more datasets to include in the response.
Include ListBlobsInclude
// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
// operation returns the NextMarker value within the response body if the listing
// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
// as the value for the marker parameter in a subsequent call to request the next
// page of list items. The marker value is opaque to the client.
Marker *string
// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
// greater than 5000, the server will return up to 5000 items. Note that if the
// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
// of the results. For this reason, it is possible that the service will
// return fewer results than specified by maxresults, or than the default of 5000.
MaxResults *int32
// Filters the results to return only containers whose name begins with the specified prefix.
Prefix *string
}
// ---------------------------------------------------------------------------------------------------------------------
// ListBlobsHierarchyOptions provides set of configurations for Client.NewListBlobsHierarchyPager
type ListBlobsHierarchyOptions struct {
// Include this parameter to specify one or more datasets to include in the response.
Include ListBlobsInclude
// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
// operation returns the NextMarker value within the response body if the listing
// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
// as the value for the marker parameter in a subsequent call to request the next
// page of list items. The marker value is opaque to the client.
Marker *string
// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
// greater than 5000, the server will return up to 5000 items. Note that if the
// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
// of the results. For this reason, it is possible that the service will
// return fewer results than specified by maxresults, or than the default of 5000.
MaxResults *int32
// Filters the results to return only containers whose name begins with the specified prefix.
Prefix *string
}
// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.ListBlobHierarchySegment method.
func (o *ListBlobsHierarchyOptions) format() generated.ContainerClientListBlobHierarchySegmentOptions {
if o == nil {
return generated.ContainerClientListBlobHierarchySegmentOptions{}
}
return generated.ContainerClientListBlobHierarchySegmentOptions{
Include: o.Include.format(),
Marker: o.Marker,
Maxresults: o.MaxResults,
Prefix: o.Prefix,
}
}
// ---------------------------------------------------------------------------------------------------------------------
// SetMetadataOptions contains the optional parameters for the Client.SetMetadata method.
type SetMetadataOptions struct {
Metadata map[string]string
LeaseAccessConditions *LeaseAccessConditions
ModifiedAccessConditions *ModifiedAccessConditions
}
func (o *SetMetadataOptions) format() (*generated.ContainerClientSetMetadataOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
return &generated.ContainerClientSetMetadataOptions{Metadata: o.Metadata}, o.LeaseAccessConditions, o.ModifiedAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// GetAccessPolicyOptions contains the optional parameters for the Client.GetAccessPolicy method.
type GetAccessPolicyOptions struct {
LeaseAccessConditions *LeaseAccessConditions
}
func (o *GetAccessPolicyOptions) format() (*generated.ContainerClientGetAccessPolicyOptions, *LeaseAccessConditions) {
if o == nil {
return nil, nil
}
return nil, o.LeaseAccessConditions
}
// ---------------------------------------------------------------------------------------------------------------------
// SetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation
type SetAccessPolicyOptions struct {
// Specifies whether data in the container may be accessed publicly and the level of access
Access *PublicAccessType
// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
// analytics logging is enabled.
AccessConditions *AccessConditions
}
func (o *SetAccessPolicyOptions) format() (*generated.ContainerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) {
if o == nil {
return nil, nil, nil
}
lac, mac := exported.FormatContainerAccessConditions(o.AccessConditions)
return &generated.ContainerClientSetAccessPolicyOptions{
Access: o.Access,
}, lac, mac
}

View file

@ -0,0 +1,38 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package container
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// CreateResponse contains the response from method Client.Create.
type CreateResponse = generated.ContainerClientCreateResponse
// DeleteResponse contains the response from method Client.Delete.
type DeleteResponse = generated.ContainerClientDeleteResponse
// RestoreResponse contains the response from method Client.Restore.
type RestoreResponse = generated.ContainerClientRestoreResponse
// GetPropertiesResponse contains the response from method Client.GetProperties.
type GetPropertiesResponse = generated.ContainerClientGetPropertiesResponse
// ListBlobsFlatResponse contains the response from method Client.ListBlobFlatSegment.
type ListBlobsFlatResponse = generated.ContainerClientListBlobFlatSegmentResponse
// ListBlobsHierarchyResponse contains the response from method Client.ListBlobHierarchySegment.
type ListBlobsHierarchyResponse = generated.ContainerClientListBlobHierarchySegmentResponse
// SetMetadataResponse contains the response from method Client.SetMetadata.
type SetMetadataResponse = generated.ContainerClientSetMetadataResponse
// GetAccessPolicyResponse contains the response from method Client.GetAccessPolicy.
type GetAccessPolicyResponse = generated.ContainerClientGetAccessPolicyResponse
// SetAccessPolicyResponse contains the response from method Client.SetAccessPolicy.
type SetAccessPolicyResponse = generated.ContainerClientSetAccessPolicyResponse

View file

@ -1,9 +1,8 @@
//go:build go1.18 //go:build go1.18
// +build go1.18 // +build go1.18
// Copyright 2017 Microsoft Corporation. All rights reserved. // Copyright (c) Microsoft Corporation. All rights reserved.
// Use of this source code is governed by an MIT // Licensed under the MIT License. See License.txt in the project root for license information.
// license that can be found in the LICENSE file.
/* /*
@ -167,13 +166,13 @@ Examples
handle(err) handle(err)
// Download the blob's contents and ensure that the download worked properly // Download the blob's contents and ensure that the download worked properly
blobDownloadResponse, err := blockBlobClient.Download(context.TODO(), nil) blobDownloadResponse, err := blockBlobClient.DownloadStream(context.TODO(), nil)
handle(err) handle(err)
// Use the bytes.Buffer object to read the downloaded data. // Use the bytes.Buffer object to read the downloaded data.
// RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here.
reader := blobDownloadResponse.Body(nil) reader := blobDownloadResponse.Body(nil)
downloadData, err := ioutil.ReadAll(reader) downloadData, err := io.ReadAll(reader)
handle(err) handle(err)
if string(downloadData) != uploadData { if string(downloadData) != uploadData {
handle(errors.New("Uploaded data should be same as downloaded data")) handle(errors.New("Uploaded data should be same as downloaded data"))

View file

@ -1,316 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"encoding/base64"
"io"
"net/http"
"sync"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal"
"bytes"
"errors"
"os"
)
// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob.
func (bb *BlockBlobClient) uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, o UploadOption) (*http.Response, error) {
if o.BlockSize == 0 {
// If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error
if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks {
return nil, errors.New("buffer is too large to upload to a block blob")
}
// If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request
if readerSize <= BlockBlobMaxUploadBlobBytes {
o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified
} else {
o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks
if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB
o.BlockSize = BlobDefaultDownloadBlockSize
}
// StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize).
}
}
if readerSize <= BlockBlobMaxUploadBlobBytes {
// If the size can fit in 1 Upload call, do it this way
var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize)
if o.Progress != nil {
body = streaming.NewRequestProgress(internal.NopCloser(body), o.Progress)
}
uploadBlockBlobOptions := o.getUploadBlockBlobOptions()
resp, err := bb.Upload(ctx, internal.NopCloser(body), uploadBlockBlobOptions)
return resp.RawResponse, err
}
var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1)
blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs
progress := int64(0)
progressLock := &sync.Mutex{}
err := DoBatchTransfer(ctx, BatchTransferOptions{
OperationName: "uploadReaderAtToBlockBlob",
TransferSize: readerSize,
ChunkSize: o.BlockSize,
Parallelism: o.Parallelism,
Operation: func(offset int64, count int64, ctx context.Context) error {
// This function is called once per block.
// It is passed this block's offset within the buffer and its count of bytes
// Prepare to read the proper block/section of the buffer
var body io.ReadSeeker = io.NewSectionReader(reader, offset, count)
blockNum := offset / o.BlockSize
if o.Progress != nil {
blockProgress := int64(0)
body = streaming.NewRequestProgress(internal.NopCloser(body),
func(bytesTransferred int64) {
diff := bytesTransferred - blockProgress
blockProgress = bytesTransferred
progressLock.Lock() // 1 goroutine at a time gets progress report
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
// Block IDs are unique values to avoid issue if 2+ clients are uploading blocks
// at the same time causing PutBlockList to get a mix of blocks from all the clients.
generatedUuid, err := uuid.New()
if err != nil {
return err
}
blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String()))
stageBlockOptions := o.getStageBlockOptions()
_, err = bb.StageBlock(ctx, blockIDList[blockNum], internal.NopCloser(body), stageBlockOptions)
return err
},
})
if err != nil {
return nil, err
}
// All put blocks were successful, call Put Block List to finalize the blob
commitBlockListOptions := o.getCommitBlockListOptions()
resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions)
return resp.RawResponse, err
}
// UploadBuffer uploads a buffer in blocks to a block blob.
func (bb *BlockBlobClient) UploadBuffer(ctx context.Context, b []byte, o UploadOption) (*http.Response, error) {
return bb.uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), o)
}
// UploadFile uploads a file in blocks to a block blob.
func (bb *BlockBlobClient) UploadFile(ctx context.Context, file *os.File, o UploadOption) (*http.Response, error) {
stat, err := file.Stat()
if err != nil {
return nil, err
}
return bb.uploadReaderAtToBlockBlob(ctx, file, stat.Size(), o)
}
// ---------------------------------------------------------------------------------------------------------------------
// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient.
// A Context deadline or cancellation will cause this to error.
func (bb *BlockBlobClient) UploadStream(ctx context.Context, body io.Reader, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) {
if err := o.defaults(); err != nil {
return BlockBlobCommitBlockListResponse{}, err
}
// If we used the default manager, we need to close it.
if o.transferMangerNotSet {
defer o.TransferManager.Close()
}
result, err := copyFromReader(ctx, body, bb, o)
if err != nil {
return BlockBlobCommitBlockListResponse{}, err
}
return result, nil
}
// ---------------------------------------------------------------------------------------------------------------------
// DownloadToWriterAt downloads an Azure blob to a WriterAt with parallel.
// Offset and count are optional, pass 0 for both to download the entire blob.
func (b *BlobClient) DownloadToWriterAt(ctx context.Context, offset int64, count int64, writer io.WriterAt, o DownloadOptions) error {
if o.BlockSize == 0 {
o.BlockSize = BlobDefaultDownloadBlockSize
}
if count == CountToEnd { // If size not specified, calculate it
// If we don't have the length at all, get it
downloadBlobOptions := o.getDownloadBlobOptions(0, CountToEnd, nil)
dr, err := b.Download(ctx, downloadBlobOptions)
if err != nil {
return err
}
count = *dr.ContentLength - offset
}
if count <= 0 {
// The file is empty, there is nothing to download.
return nil
}
// Prepare and do parallel download.
progress := int64(0)
progressLock := &sync.Mutex{}
err := DoBatchTransfer(ctx, BatchTransferOptions{
OperationName: "downloadBlobToWriterAt",
TransferSize: count,
ChunkSize: o.BlockSize,
Parallelism: o.Parallelism,
Operation: func(chunkStart int64, count int64, ctx context.Context) error {
downloadBlobOptions := o.getDownloadBlobOptions(chunkStart+offset, count, nil)
dr, err := b.Download(ctx, downloadBlobOptions)
if err != nil {
return err
}
body := dr.Body(&o.RetryReaderOptionsPerBlock)
if o.Progress != nil {
rangeProgress := int64(0)
body = streaming.NewResponseProgress(
body,
func(bytesTransferred int64) {
diff := bytesTransferred - rangeProgress
rangeProgress = bytesTransferred
progressLock.Lock()
progress += diff
o.Progress(progress)
progressLock.Unlock()
})
}
_, err = io.Copy(newSectionWriter(writer, chunkStart, count), body)
if err != nil {
return err
}
err = body.Close()
return err
},
})
if err != nil {
return err
}
return nil
}
// DownloadToBuffer downloads an Azure blob to a buffer with parallel.
// Offset and count are optional, pass 0 for both to download the entire blob.
func (b *BlobClient) DownloadToBuffer(ctx context.Context, offset int64, count int64, _bytes []byte, o DownloadOptions) error {
return b.DownloadToWriterAt(ctx, offset, count, newBytesWriter(_bytes), o)
}
// DownloadToFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
// Offset and count are optional, pass 0 for both to download the entire blob.
func (b *BlobClient) DownloadToFile(ctx context.Context, offset int64, count int64, file *os.File, o DownloadOptions) error {
// 1. Calculate the size of the destination file
var size int64
if count == CountToEnd {
// Try to get Azure blob's size
getBlobPropertiesOptions := o.getBlobPropertiesOptions()
props, err := b.GetProperties(ctx, getBlobPropertiesOptions)
if err != nil {
return err
}
size = *props.ContentLength - offset
} else {
size = count
}
// 2. Compare and try to resize local file's size if it doesn't match Azure blob's size.
stat, err := file.Stat()
if err != nil {
return err
}
if stat.Size() != size {
if err = file.Truncate(size); err != nil {
return err
}
}
if size > 0 {
return b.DownloadToWriterAt(ctx, offset, size, file, o)
} else { // if the blob's size is 0, there is no need in downloading it
return nil
}
}
// ---------------------------------------------------------------------------------------------------------------------
// DoBatchTransfer helps to execute operations in a batch manner.
// Can be used by users to customize batch works (for other scenarios that the SDK does not provide)
func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error {
if o.ChunkSize == 0 {
return errors.New("ChunkSize cannot be 0")
}
if o.Parallelism == 0 {
o.Parallelism = 5 // default Parallelism
}
// Prepare and do parallel operations.
numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1)
operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently
operationResponseChannel := make(chan error, numChunks) // Holds each response
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Create the goroutines that process each operation (in parallel).
for g := uint16(0); g < o.Parallelism; g++ {
//grIndex := g
go func() {
for f := range operationChannel {
err := f()
operationResponseChannel <- err
}
}()
}
// Add each chunk's operation to the channel.
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
curChunkSize := o.ChunkSize
if chunkNum == numChunks-1 { // Last chunk
curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total
}
offset := int64(chunkNum) * o.ChunkSize
operationChannel <- func() error {
return o.Operation(offset, curChunkSize, ctx)
}
}
close(operationChannel)
// Wait for the operations to complete.
var firstErr error = nil
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
responseError := <-operationResponseChannel
// record the first error (the original error which should cause the other chunks to fail with canceled context)
if responseError != nil && firstErr == nil {
cancel() // As soon as any operation fails, cancel all remaining operation calls
firstErr = responseError
}
}
return firstErr
}
// ---------------------------------------------------------------------------------------------------------------------

View file

@ -0,0 +1,89 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package base
import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
type Client[T any] struct {
inner *T
sharedKey *exported.SharedKeyCredential
}
func InnerClient[T any](client *Client[T]) *T {
return client.inner
}
func SharedKey[T any](client *Client[T]) *exported.SharedKeyCredential {
return client.sharedKey
}
func NewClient[T any](inner *T) *Client[T] {
return &Client[T]{inner: inner}
}
func NewServiceClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ServiceClient] {
return &Client[generated.ServiceClient]{
inner: generated.NewServiceClient(containerURL, pipeline),
sharedKey: sharedKey,
}
}
func NewContainerClient(containerURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.ContainerClient] {
return &Client[generated.ContainerClient]{
inner: generated.NewContainerClient(containerURL, pipeline),
sharedKey: sharedKey,
}
}
func NewBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *Client[generated.BlobClient] {
return &Client[generated.BlobClient]{
inner: generated.NewBlobClient(blobURL, pipeline),
sharedKey: sharedKey,
}
}
type CompositeClient[T, U any] struct {
innerT *T
innerU *U
sharedKey *exported.SharedKeyCredential
}
func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) {
return &Client[T]{inner: client.innerT}, client.innerU
}
func NewAppendBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] {
return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{
innerT: generated.NewBlobClient(blobURL, pipeline),
innerU: generated.NewAppendBlobClient(blobURL, pipeline),
sharedKey: sharedKey,
}
}
func NewBlockBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] {
return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{
innerT: generated.NewBlobClient(blobURL, pipeline),
innerU: generated.NewBlockBlobClient(blobURL, pipeline),
sharedKey: sharedKey,
}
}
func NewPageBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] {
return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{
innerT: generated.NewBlobClient(blobURL, pipeline),
innerU: generated.NewPageBlobClient(blobURL, pipeline),
sharedKey: sharedKey,
}
}
func SharedKeyComposite[T, U any](client *CompositeClient[T, U]) *exported.SharedKeyCredential {
return client.sharedKey
}

View file

@ -0,0 +1,43 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package exported
import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
const SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00"
// ContainerAccessConditions identifies container-specific access conditions which you optionally set.
type ContainerAccessConditions struct {
ModifiedAccessConditions *ModifiedAccessConditions
LeaseAccessConditions *LeaseAccessConditions
}
func FormatContainerAccessConditions(b *ContainerAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) {
if b == nil {
return nil, nil
}
return b.LeaseAccessConditions, b.ModifiedAccessConditions
}
// BlobAccessConditions identifies blob-specific access conditions which you optionally set.
type BlobAccessConditions struct {
LeaseAccessConditions *LeaseAccessConditions
ModifiedAccessConditions *ModifiedAccessConditions
}
func FormatBlobAccessConditions(b *BlobAccessConditions) (*LeaseAccessConditions, *ModifiedAccessConditions) {
if b == nil {
return nil, nil
}
return b.LeaseAccessConditions, b.ModifiedAccessConditions
}
// LeaseAccessConditions contains optional parameters to access leased entity.
type LeaseAccessConditions = generated.LeaseAccessConditions
// ModifiedAccessConditions contains a group of parameters for specifying access conditions.
type ModifiedAccessConditions = generated.ModifiedAccessConditions

View file

@ -2,9 +2,9 @@
// +build go1.18 // +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved. // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. // Licensed under the MIT License. See License.txt in the project root for license information.
package azblob package exported
import ( import (
"bytes" "bytes"
@ -19,7 +19,7 @@ type AccessPolicyPermission struct {
// String produces the access policy permission string for an Azure Storage container. // String produces the access policy permission string for an Azure Storage container.
// Call this method to set AccessPolicy's Permission field. // Call this method to set AccessPolicy's Permission field.
func (p AccessPolicyPermission) String() string { func (p *AccessPolicyPermission) String() string {
var b bytes.Buffer var b bytes.Buffer
if p.Read { if p.Read {
b.WriteRune('r') b.WriteRune('r')

View file

@ -0,0 +1,33 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package exported
import (
"fmt"
"strconv"
)
// HTTPRange defines a range of bytes within an HTTP resource, starting at offset and
// ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange
// which has an offset but no zero value count indicates from the offset to the resource's end.
type HTTPRange struct {
Offset int64
Count int64
}
// FormatHTTPRange converts an HTTPRange to its string format.
func FormatHTTPRange(r HTTPRange) *string {
if r.Offset == 0 && r.Count == 0 {
return nil // No specified range
}
endOffset := "" // if count == CountToEnd (0)
if r.Count > 0 {
endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10)
}
dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset)
return &dataRange
}

View file

@ -2,9 +2,9 @@
// +build go1.18 // +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved. // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. // Licensed under the MIT License. See License.txt in the project root for license information.
package azblob package exported
import ( import (
"bytes" "bytes"
@ -22,6 +22,7 @@ import (
azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/Azure/azure-sdk-for-go/sdk/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
) )
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the // NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
@ -35,7 +36,6 @@ func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCr
} }
// SharedKeyCredential contains an account's name and its primary or secondary key. // SharedKeyCredential contains an account's name and its primary or secondary key.
// It is immutable making it shareable and goroutine-safe.
type SharedKeyCredential struct { type SharedKeyCredential struct {
// Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only
accountName string accountName string
@ -58,7 +58,7 @@ func (c *SharedKeyCredential) SetAccountKey(accountKey string) error {
} }
// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. // ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
func (c *SharedKeyCredential) ComputeHMACSHA256(message string) (string, error) { func (c *SharedKeyCredential) computeHMACSHA256(message string) (string, error) {
h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) h := hmac.New(sha256.New, c.accountKey.Load().([]byte))
_, err := h.Write([]byte(message)) _, err := h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil)), err return base64.StdEncoding.EncodeToString(h.Sum(nil)), err
@ -67,7 +67,7 @@ func (c *SharedKeyCredential) ComputeHMACSHA256(message string) (string, error)
func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) {
// https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services
headers := req.Header headers := req.Header
contentLength := headers.Get(headerContentLength) contentLength := getHeader(shared.HeaderContentLength, headers)
if contentLength == "0" { if contentLength == "0" {
contentLength = "" contentLength = ""
} }
@ -79,23 +79,36 @@ func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, erro
stringToSign := strings.Join([]string{ stringToSign := strings.Join([]string{
req.Method, req.Method,
headers.Get(headerContentEncoding), getHeader(shared.HeaderContentEncoding, headers),
headers.Get(headerContentLanguage), getHeader(shared.HeaderContentLanguage, headers),
contentLength, contentLength,
headers.Get(headerContentMD5), getHeader(shared.HeaderContentMD5, headers),
headers.Get(headerContentType), getHeader(shared.HeaderContentType, headers),
"", // Empty date because x-ms-date is expected (as per web page above) "", // Empty date because x-ms-date is expected (as per web page above)
headers.Get(headerIfModifiedSince), getHeader(shared.HeaderIfModifiedSince, headers),
headers.Get(headerIfMatch), getHeader(shared.HeaderIfMatch, headers),
headers.Get(headerIfNoneMatch), getHeader(shared.HeaderIfNoneMatch, headers),
headers.Get(headerIfUnmodifiedSince), getHeader(shared.HeaderIfUnmodifiedSince, headers),
headers.Get(headerRange), getHeader(shared.HeaderRange, headers),
c.buildCanonicalizedHeader(headers), c.buildCanonicalizedHeader(headers),
canonicalizedResource, canonicalizedResource,
}, "\n") }, "\n")
return stringToSign, nil return stringToSign, nil
} }
func getHeader(key string, headers map[string][]string) string {
if headers == nil {
return ""
}
if v, ok := headers[key]; ok {
if len(v) > 0 {
return v[0]
}
}
return ""
}
func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string {
cm := map[string][]string{} cm := map[string][]string{}
for k, v := range headers { for k, v := range headers {
@ -165,33 +178,41 @@ func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, er
return cr.String(), nil return cr.String(), nil
} }
type sharedKeyCredPolicy struct { // ComputeHMACSHA256 is a helper for computing the signed string outside of this package.
func ComputeHMACSHA256(cred *SharedKeyCredential, message string) (string, error) {
return cred.computeHMACSHA256(message)
}
// the following content isn't actually exported but must live
// next to SharedKeyCredential as it uses its unexported methods
type SharedKeyCredPolicy struct {
cred *SharedKeyCredential cred *SharedKeyCredential
} }
func newSharedKeyCredPolicy(cred *SharedKeyCredential) *sharedKeyCredPolicy { func NewSharedKeyCredPolicy(cred *SharedKeyCredential) *SharedKeyCredPolicy {
return &sharedKeyCredPolicy{cred: cred} return &SharedKeyCredPolicy{cred: cred}
} }
func (s *sharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) {
if d := req.Raw().Header.Get(headerXmsDate); d == "" { if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" {
req.Raw().Header.Set(headerXmsDate, time.Now().UTC().Format(http.TimeFormat)) req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat))
} }
stringToSign, err := s.cred.buildStringToSign(req.Raw()) stringToSign, err := s.cred.buildStringToSign(req.Raw())
if err != nil { if err != nil {
return nil, err return nil, err
} }
signature, err := s.cred.ComputeHMACSHA256(stringToSign) signature, err := s.cred.computeHMACSHA256(stringToSign)
if err != nil { if err != nil {
return nil, err return nil, err
} }
authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "")
req.Raw().Header.Set(headerAuthorization, authHeader) req.Raw().Header.Set(shared.HeaderAuthorization, authHeader)
response, err := req.Next() response, err := req.Next()
if err != nil && response != nil && response.StatusCode == http.StatusForbidden { if err != nil && response != nil && response.StatusCode == http.StatusForbidden {
// Service failed to authenticate request, log it // Service failed to authenticate request, log it
log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-NewSASQueryParameters:\n"+stringToSign+"\n===============================\n") log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-Sign:\n"+stringToSign+"\n===============================\n")
} }
return response, err return response, err
} }

View file

@ -0,0 +1,64 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package exported
import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// NewUserDelegationCredential creates a new UserDelegationCredential using a Storage account's Name and a user delegation Key from it
func NewUserDelegationCredential(accountName string, udk UserDelegationKey) *UserDelegationCredential {
return &UserDelegationCredential{
accountName: accountName,
userDelegationKey: udk,
}
}
// UserDelegationKey contains UserDelegationKey.
type UserDelegationKey = generated.UserDelegationKey
// UserDelegationCredential contains an account's name and its user delegation key.
type UserDelegationCredential struct {
accountName string
userDelegationKey UserDelegationKey
}
// AccountName returns the Storage account's Name
func (f *UserDelegationCredential) getAccountName() string {
return f.accountName
}
// GetUDKParams is a helper method for accessing the user delegation key parameters outside of this package.
func GetAccountName(udc *UserDelegationCredential) string {
return udc.getAccountName()
}
// computeHMACSHA256 generates a hash signature for an HTTP request or for a SAS.
func (f *UserDelegationCredential) computeHMACSHA256(message string) (string, error) {
bytes, _ := base64.StdEncoding.DecodeString(*f.userDelegationKey.Value)
h := hmac.New(sha256.New, bytes)
_, err := h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil)), err
}
// ComputeUDCHMACSHA256 is a helper method for computing the signed string outside of this package.
func ComputeUDCHMACSHA256(udc *UserDelegationCredential, message string) (string, error) {
return udc.computeHMACSHA256(message)
}
// GetUDKParams returns UserDelegationKey
func (f *UserDelegationCredential) getUDKParams() *UserDelegationKey {
return &f.userDelegationKey
}
// GetUDKParams is a helper method for accessing the user delegation key parameters outside of this package.
func GetUDKParams(udc *UserDelegationCredential) *UserDelegationKey {
return udc.getUDKParams()
}

View file

@ -0,0 +1,12 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package exported
const (
ModuleName = "azblob"
ModuleVersion = "v0.5.0"
)

View file

@ -0,0 +1,19 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
func (client *AppendBlobClient) Endpoint() string {
return client.endpoint
}
func (client *AppendBlobClient) Pipeline() runtime.Pipeline {
return client.pl
}

View file

@ -0,0 +1,304 @@
# Code Generation - Azure Blob SDK for Golang
### Settings
```yaml
go: true
clear-output-folder: false
version: "^3.0.0"
license-header: MICROSOFT_MIT_NO_VERSION
input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/e515b6251fdc21015282d2e84b85beec7c091763/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json"
credential-scope: "https://storage.azure.com/.default"
output-folder: .
file-prefix: "zz_"
openapi-type: "data-plane"
verbose: true
security: AzureKey
modelerfour:
group-parameters: false
seal-single-value-enum-by-default: true
lenient-model-deduplication: true
export-clients: true
use: "@autorest/go@4.0.0-preview.43"
```
### Remove pager methods and export various generated methods in container client
``` yaml
directive:
- from: zz_container_client.go
where: $
transform: >-
return $.
replace(/func \(client \*ContainerClient\) NewListBlobFlatSegmentPager\(.+\/\/ listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request/s, `// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request`).
replace(/\(client \*ContainerClient\) listBlobFlatSegmentCreateRequest\(/, `(client *ContainerClient) ListBlobFlatSegmentCreateRequest(`).
replace(/\(client \*ContainerClient\) listBlobFlatSegmentHandleResponse\(/, `(client *ContainerClient) ListBlobFlatSegmentHandleResponse(`);
```
### Remove pager methods and export various generated methods in service client
``` yaml
directive:
- from: zz_service_client.go
where: $
transform: >-
return $.
replace(/func \(client \*ServiceClient\) NewListContainersSegmentPager\(.+\/\/ listContainersSegmentCreateRequest creates the ListContainersSegment request/s, `// listContainersSegmentCreateRequest creates the ListContainersSegment request`).
replace(/\(client \*ServiceClient\) listContainersSegmentCreateRequest\(/, `(client *ServiceClient) ListContainersSegmentCreateRequest(`).
replace(/\(client \*ServiceClient\) listContainersSegmentHandleResponse\(/, `(client *ServiceClient) ListContainersSegmentHandleResponse(`);
```
### Fix BlobMetadata.
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.BlobMetadata["properties"];
```
### Don't include container name or blob in path - we have direct URIs.
``` yaml
directive:
- from: swagger-document
where: $["x-ms-paths"]
transform: >
for (const property in $)
{
if (property.includes('/{containerName}/{blob}'))
{
$[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))});
}
else if (property.includes('/{containerName}'))
{
$[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))});
}
}
```
### Remove DataLake stuff.
``` yaml
directive:
- from: swagger-document
where: $["x-ms-paths"]
transform: >
for (const property in $)
{
if (property.includes('filesystem'))
{
delete $[property];
}
}
```
### Remove DataLakeStorageError
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.DataLakeStorageError;
```
### Fix 304s
``` yaml
directive:
- from: swagger-document
where: $["x-ms-paths"]["/{containerName}/{blob}"]
transform: >
$.get.responses["304"] = {
"description": "The condition specified using HTTP conditional header(s) is not met.",
"x-az-response-name": "ConditionNotMetError",
"headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } }
};
```
### Fix GeoReplication
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.GeoReplication.properties.Status["x-ms-enum"];
$.GeoReplication.properties.Status["x-ms-enum"] = {
"name": "BlobGeoReplicationStatus",
"modelAsString": false
};
```
### Fix RehydratePriority
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
delete $.RehydratePriority["x-ms-enum"];
$.RehydratePriority["x-ms-enum"] = {
"name": "RehydratePriority",
"modelAsString": false
};
```
### Fix BlobDeleteType
``` yaml
directive:
- from: swagger-document
where: $.parameters
transform: >
delete $.BlobDeleteType.enum;
$.BlobDeleteType.enum = [
"None",
"Permanent"
];
```
### Fix EncryptionAlgorithm
``` yaml
directive:
- from: swagger-document
where: $.parameters
transform: >
delete $.EncryptionAlgorithm.enum;
$.EncryptionAlgorithm.enum = [
"None",
"AES256"
];
```
### Fix XML string "ObjectReplicationMetadata" to "OrMetadata"
``` yaml
directive:
- from: swagger-document
where: $.definitions
transform: >
$.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"];
delete $.BlobItemInternal.properties["ObjectReplicationMetadata"];
```
# Export various createRequest/HandleResponse methods
``` yaml
directive:
- from: zz_container_client.go
where: $
transform: >-
return $.
replace(/listBlobHierarchySegmentCreateRequest/g, function(_, s) { return `ListBlobHierarchySegmentCreateRequest` }).
replace(/listBlobHierarchySegmentHandleResponse/g, function(_, s) { return `ListBlobHierarchySegmentHandleResponse` });
- from: zz_pageblob_client.go
where: $
transform: >-
return $.
replace(/getPageRanges(Diff)?CreateRequest/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}CreateRequest` }).
replace(/getPageRanges(Diff)?HandleResponse/g, function(_, s) { if (s === undefined) { s = '' }; return `GetPageRanges${s}HandleResponse` });
```
### Clean up some const type names so they don't stutter
``` yaml
directive:
- from: swagger-document
where: $.parameters['BlobDeleteType']
transform: >
$["x-ms-enum"].name = "DeleteType";
$["x-ms-client-name"] = "DeleteType";
- from: swagger-document
where: $.parameters['BlobExpiryOptions']
transform: >
$["x-ms-enum"].name = "ExpiryOptions";
$["x-ms-client-name"].name = "ExpiryOptions";
- from: swagger-document
where: $["x-ms-paths"][*].*.responses[*].headers["x-ms-immutability-policy-mode"]
transform: >
$["x-ms-client-name"].name = "ImmutabilityPolicyMode";
$.enum = [ "Mutable", "Unlocked", "Locked"];
$["x-ms-enum"] = { "name": "ImmutabilityPolicyMode", "modelAsString": false };
- from: swagger-document
where: $.parameters['ImmutabilityPolicyMode']
transform: >
$["x-ms-enum"].name = "ImmutabilityPolicySetting";
$["x-ms-client-name"].name = "ImmutabilityPolicySetting";
- from: swagger-document
where: $.definitions['BlobPropertiesInternal']
transform: >
$.properties.ImmutabilityPolicyMode["x-ms-enum"].name = "ImmutabilityPolicyMode";
```
### use azcore.ETag
``` yaml
directive:
- from: zz_models.go
where: $
transform: >-
return $.
replace(/import "time"/, `import (\n\t"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n)`).
replace(/Etag\s+\*string/g, `ETag *azcore.ETag`).
replace(/IfMatch\s+\*string/g, `IfMatch *azcore.ETag`).
replace(/IfNoneMatch\s+\*string/g, `IfNoneMatch *azcore.ETag`).
replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`).
replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`);
- from: zz_response_types.go
where: $
transform: >-
return $.
replace(/"time"/, `"time"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore"`).
replace(/ETag\s+\*string/g, `ETag *azcore.ETag`);
- from:
- zz_appendblob_client.go
- zz_blob_client.go
- zz_blockblob_client.go
- zz_container_client.go
- zz_pageblob_client.go
where: $
transform: >-
return $.
replace(/"github\.com\/Azure\/azure\-sdk\-for\-go\/sdk\/azcore\/policy"/, `"github.com/Azure/azure-sdk-for-go/sdk/azcore"\n\t"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"`).
replace(/result\.ETag\s+=\s+&val/g, `result.ETag = (*azcore.ETag)(&val)`).
replace(/\*modifiedAccessConditions.IfMatch/g, `string(*modifiedAccessConditions.IfMatch)`).
replace(/\*modifiedAccessConditions.IfNoneMatch/g, `string(*modifiedAccessConditions.IfNoneMatch)`).
replace(/\*sourceModifiedAccessConditions.SourceIfMatch/g, `string(*sourceModifiedAccessConditions.SourceIfMatch)`).
replace(/\*sourceModifiedAccessConditions.SourceIfNoneMatch/g, `string(*sourceModifiedAccessConditions.SourceIfNoneMatch)`);
```
### Unsure why this casing changed, but fixing it
``` yaml
directive:
- from: zz_models.go
where: $
transform: >-
return $.
replace(/SignedOid\s+\*string/g, `SignedOID *string`).
replace(/SignedTid\s+\*string/g, `SignedTID *string`);
```
### Fixing Typo with StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed
``` yaml
directive:
- from: zz_constants.go
where: $
transform: >-
return $.
replace(/StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEralierVersionSnapshotNotAllowed"\n, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed\t+\StorageErrorCode\s+=\s+\"IncrementalCopyOfEarlierVersionSnapshotNotAllowed"\
replace(/StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g, /StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed/g)
```

View file

@ -0,0 +1,17 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
func (client *BlobClient) Endpoint() string {
return client.endpoint
}
func (client *BlobClient) Pipeline() runtime.Pipeline {
return client.pl
}

View file

@ -0,0 +1,19 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
func (client *BlockBlobClient) Endpoint() string {
return client.endpoint
}
func (client *BlockBlobClient) Pipeline() runtime.Pipeline {
return client.pl
}

View file

@ -0,0 +1,17 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
func (client *ContainerClient) Endpoint() string {
return client.endpoint
}
func (client *ContainerClient) Pipeline() runtime.Pipeline {
return client.pl
}

View file

@ -0,0 +1,17 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
func (client *PageBlobClient) Endpoint() string {
return client.endpoint
}
func (client *PageBlobClient) Pipeline() runtime.Pipeline {
return client.pl
}

View file

@ -0,0 +1,17 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package generated
import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
func (client *ServiceClient) Endpoint() string {
return client.endpoint
}
func (client *ServiceClient) Pipeline() runtime.Pipeline {
return client.pl
}

View file

@ -0,0 +1,653 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// DO NOT EDIT.
package generated
import (
"context"
"encoding/base64"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"io"
"net/http"
"strconv"
"time"
)
// AppendBlobClient contains the methods for the AppendBlob group.
// Don't use this type directly, use NewAppendBlobClient() instead.
type AppendBlobClient struct {
endpoint string
pl runtime.Pipeline
}
// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values.
// endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// pl - the pipeline used for sending requests and handling responses.
func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient {
client := &AppendBlobClient{
endpoint: endpoint,
pl: pl,
}
return client
}
// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append
// Block operation is permitted only if the blob was created with x-ms-blob-type set to
// AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
// If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// contentLength - The length of the request.
// body - Initial data
// options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method.
// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock
// method.
// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method.
// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) {
req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
if err != nil {
return AppendBlobClientAppendBlockResponse{}, err
}
resp, err := client.pl.Do(req)
if err != nil {
return AppendBlobClientAppendBlockResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
return AppendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp)
}
return client.appendBlockHandleResponse(resp)
}
// appendBlockCreateRequest creates the AppendBlock request.
func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "appendblock")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
if options != nil && options.TransactionalContentCRC64 != nil {
req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil {
req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)}
}
if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil {
req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
}
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, req.SetBody(body, "application/octet-stream")
}
// appendBlockHandleResponse handles the AppendBlock response.
func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) {
result := AppendBlobClientAppendBlockResponse{}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = (*azcore.ETag)(&val)
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
return AppendBlobClientAppendBlockResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return AppendBlobClientAppendBlockResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return AppendBlobClientAppendBlockResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
return AppendBlobClientAppendBlockResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" {
result.BlobAppendOffset = &val
}
if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" {
blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
blobCommittedBlockCount := int32(blobCommittedBlockCount32)
if err != nil {
return AppendBlobClientAppendBlockResponse{}, err
}
result.BlobCommittedBlockCount = &blobCommittedBlockCount
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
return AppendBlobClientAppendBlockResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
result.EncryptionKeySHA256 = &val
}
if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
result.EncryptionScope = &val
}
return result, nil
}
// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where
// the contents are read from a source url. The Append Block operation is permitted only if the blob was
// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
// If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// sourceURL - Specify a URL to the copy source.
// contentLength - The length of the request.
// options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL
// method.
// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method.
// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock
// method.
// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL
// method.
func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) {
req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
if err != nil {
return AppendBlobClientAppendBlockFromURLResponse{}, err
}
resp, err := client.pl.Do(req)
if err != nil {
return AppendBlobClientAppendBlockFromURLResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
return AppendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp)
}
return client.appendBlockFromURLHandleResponse(resp)
}
// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request.
func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "appendblock")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-copy-source"] = []string{sourceURL}
if options != nil && options.SourceRange != nil {
req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange}
}
if options != nil && options.SourceContentMD5 != nil {
req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
}
if options != nil && options.SourceContentcrc64 != nil {
req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)}
}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
}
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil {
req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)}
}
if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil {
req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if options != nil && options.CopySourceAuthorization != nil {
req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response.
func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) {
result := AppendBlobClientAppendBlockFromURLResponse{}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = (*azcore.ETag)(&val)
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
return AppendBlobClientAppendBlockFromURLResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return AppendBlobClientAppendBlockFromURLResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return AppendBlobClientAppendBlockFromURLResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
return AppendBlobClientAppendBlockFromURLResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" {
result.BlobAppendOffset = &val
}
if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" {
blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
blobCommittedBlockCount := int32(blobCommittedBlockCount32)
if err != nil {
return AppendBlobClientAppendBlockFromURLResponse{}, err
}
result.BlobCommittedBlockCount = &blobCommittedBlockCount
}
if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
result.EncryptionKeySHA256 = &val
}
if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
result.EncryptionScope = &val
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
return AppendBlobClientAppendBlockFromURLResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
return result, nil
}
// Create - The Create Append Blob operation creates a new append blob.
// If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// contentLength - The length of the request.
// options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method.
// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method.
// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) {
req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
if err != nil {
return AppendBlobClientCreateResponse{}, err
}
resp, err := client.pl.Do(req)
if err != nil {
return AppendBlobClientCreateResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
return AppendBlobClientCreateResponse{}, runtime.NewResponseError(resp)
}
return client.createHandleResponse(resp)
}
// createCreateRequest creates the Create request.
func (client *AppendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil {
req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
}
if options != nil && options.Metadata != nil {
for k, v := range options.Metadata {
req.Raw().Header["x-ms-meta-"+k] = []string{v}
}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
}
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
}
if options != nil && options.LegalHold != nil {
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
// createHandleResponse handles the Create response.
func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) {
result := AppendBlobClientCreateResponse{}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = (*azcore.ETag)(&val)
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
return AppendBlobClientCreateResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return AppendBlobClientCreateResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
if val := resp.Header.Get("x-ms-version-id"); val != "" {
result.VersionID = &val
}
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
return AppendBlobClientCreateResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
return AppendBlobClientCreateResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
result.EncryptionKeySHA256 = &val
}
if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
result.EncryptionScope = &val
}
return result, nil
}
// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version
// or later.
// If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method.
// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock
// method.
func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) {
req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions)
if err != nil {
return AppendBlobClientSealResponse{}, err
}
resp, err := client.pl.Do(req)
if err != nil {
return AppendBlobClientSealResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return AppendBlobClientSealResponse{}, runtime.NewResponseError(resp)
}
return client.sealHandleResponse(resp)
}
// sealCreateRequest creates the Seal request.
func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "seal")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil {
req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
// sealHandleResponse handles the Seal response.
func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) {
result := AppendBlobClientSealResponse{}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = (*azcore.ETag)(&val)
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
return AppendBlobClientSealResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
return AppendBlobClientSealResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-blob-sealed"); val != "" {
isSealed, err := strconv.ParseBool(val)
if err != nil {
return AppendBlobClientSealResponse{}, err
}
result.IsSealed = &isSealed
}
return result, nil
}

View file

@ -0,0 +1,960 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// DO NOT EDIT.
package generated
import (
"context"
"encoding/base64"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"io"
"net/http"
"strconv"
"time"
)
// BlockBlobClient contains the methods for the BlockBlob group.
// Don't use this type directly, use NewBlockBlobClient() instead.
type BlockBlobClient struct {
endpoint string
pl runtime.Pipeline
}
// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values.
// endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// pl - the pipeline used for sending requests and handling responses.
func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient {
client := &BlockBlobClient{
endpoint: endpoint,
pl: pl,
}
return client
}
// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob.
// In order to be written as part of a blob, a block must have been successfully written to the
// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that
// have changed, then committing the new and existing blocks together. You can do
// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit
// the most recently uploaded version of the block, whichever list it may
// belong to.
// If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// blocks - Blob Blocks.
// options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList
// method.
// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method.
// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) {
req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
if err != nil {
return BlockBlobClientCommitBlockListResponse{}, err
}
resp, err := client.pl.Do(req)
if err != nil {
return BlockBlobClientCommitBlockListResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
return BlockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp)
}
return client.commitBlockListHandleResponse(resp)
}
// commitBlockListCreateRequest creates the CommitBlockList request.
func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "blocklist")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil {
req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)}
}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
if options != nil && options.TransactionalContentCRC64 != nil {
req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
}
if options != nil && options.Metadata != nil {
for k, v := range options.Metadata {
req.Raw().Header["x-ms-meta-"+k] = []string{v}
}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
}
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if options != nil && options.Tier != nil {
req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
}
if options != nil && options.LegalHold != nil {
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, runtime.MarshalAsXML(req, blocks)
}
// commitBlockListHandleResponse handles the CommitBlockList response.
func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) {
result := BlockBlobClientCommitBlockListResponse{}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = (*azcore.ETag)(&val)
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
return BlockBlobClientCommitBlockListResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlockBlobClientCommitBlockListResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlockBlobClientCommitBlockListResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
if val := resp.Header.Get("x-ms-version-id"); val != "" {
result.VersionID = &val
}
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
return BlockBlobClientCommitBlockListResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
return BlockBlobClientCommitBlockListResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
result.EncryptionKeySHA256 = &val
}
if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
result.EncryptionScope = &val
}
return result, nil
}
// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob
// If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together.
// options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method.
// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) {
req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
return BlockBlobClientGetBlockListResponse{}, err
}
resp, err := client.pl.Do(req)
if err != nil {
return BlockBlobClientGetBlockListResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return BlockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp)
}
return client.getBlockListHandleResponse(resp)
}
// getBlockListCreateRequest creates the GetBlockList request.
func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "blocklist")
if options != nil && options.Snapshot != nil {
reqQP.Set("snapshot", *options.Snapshot)
}
reqQP.Set("blocklisttype", string(listType))
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
// getBlockListHandleResponse handles the GetBlockList response.
func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) {
result := BlockBlobClientGetBlockListResponse{}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
return BlockBlobClientGetBlockListResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = (*azcore.ETag)(&val)
}
if val := resp.Header.Get("Content-Type"); val != "" {
result.ContentType = &val
}
if val := resp.Header.Get("x-ms-blob-content-length"); val != "" {
blobContentLength, err := strconv.ParseInt(val, 10, 64)
if err != nil {
return BlockBlobClientGetBlockListResponse{}, err
}
result.BlobContentLength = &blobContentLength
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
return BlockBlobClientGetBlockListResponse{}, err
}
result.Date = &date
}
if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil {
return BlockBlobClientGetBlockListResponse{}, err
}
return result, nil
}
// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from
// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not
// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform
// partial updates to a block blobs contents using a source URL, use the Put
// Block from URL API in conjunction with Put Block List.
// If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// contentLength - The length of the request.
// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
// URI. The source blob must either be public or must be authenticated via a shared access signature.
// options - BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL
// method.
// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method.
// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL
// method.
func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) {
req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions)
if err != nil {
return BlockBlobClientPutBlobFromURLResponse{}, err
}
resp, err := client.pl.Do(req)
if err != nil {
return BlockBlobClientPutBlobFromURLResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
return BlockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp)
}
return client.putBlobFromURLHandleResponse(resp)
}
// putBlobFromURLCreateRequest creates the PutBlobFromURL request.
func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil {
req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
}
if options != nil && options.Metadata != nil {
for k, v := range options.Metadata {
req.Raw().Header["x-ms-meta-"+k] = []string{v}
}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
}
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if options != nil && options.Tier != nil {
req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil {
req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if options != nil && options.SourceContentMD5 != nil {
req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
}
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
req.Raw().Header["x-ms-copy-source"] = []string{copySource}
if options != nil && options.CopySourceBlobProperties != nil {
req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)}
}
if options != nil && options.CopySourceAuthorization != nil {
req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
// putBlobFromURLHandleResponse handles the PutBlobFromURL response.
func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) {
result := BlockBlobClientPutBlobFromURLResponse{}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = (*azcore.ETag)(&val)
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
return BlockBlobClientPutBlobFromURLResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlockBlobClientPutBlobFromURLResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
if val := resp.Header.Get("x-ms-version-id"); val != "" {
result.VersionID = &val
}
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
return BlockBlobClientPutBlobFromURLResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
return BlockBlobClientPutBlobFromURLResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
result.EncryptionKeySHA256 = &val
}
if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
result.EncryptionScope = &val
}
return result, nil
}
// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob
// If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal
// to 64 bytes in size. For a given blob, the length of the value specified for the blockid
// parameter must be the same size for each block.
// contentLength - The length of the request.
// body - Initial data
// options - BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method.
// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method.
// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (BlockBlobClientStageBlockResponse, error) {
req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo)
if err != nil {
return BlockBlobClientStageBlockResponse{}, err
}
resp, err := client.pl.Do(req)
if err != nil {
return BlockBlobClientStageBlockResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
return BlockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp)
}
return client.stageBlockHandleResponse(resp)
}
// stageBlockCreateRequest creates the StageBlock request.
func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "block")
reqQP.Set("blockid", blockID)
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
if options != nil && options.TransactionalContentCRC64 != nil {
req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
}
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, req.SetBody(body, "application/octet-stream")
}
// stageBlockHandleResponse handles the StageBlock response.
func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) {
result := BlockBlobClientStageBlockResponse{}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlockBlobClientStageBlockResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
return BlockBlobClientStageBlockResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlockBlobClientStageBlockResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
return BlockBlobClientStageBlockResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
result.EncryptionKeySHA256 = &val
}
if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
result.EncryptionScope = &val
}
return result, nil
}
// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents
// are read from a URL.
// If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal
// to 64 bytes in size. For a given blob, the length of the value specified for the blockid
// parameter must be the same size for each block.
// contentLength - The length of the request.
// sourceURL - Specify a URL to the copy source.
// options - BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL
// method.
// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method.
// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL
// method.
func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) {
req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions)
if err != nil {
return BlockBlobClientStageBlockFromURLResponse{}, err
}
resp, err := client.pl.Do(req)
if err != nil {
return BlockBlobClientStageBlockFromURLResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
return BlockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp)
}
return client.stageBlockFromURLHandleResponse(resp)
}
// stageBlockFromURLCreateRequest creates the StageBlockFromURL request.
func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "block")
reqQP.Set("blockid", blockID)
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
req.Raw().Header["x-ms-copy-source"] = []string{sourceURL}
if options != nil && options.SourceRange != nil {
req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange}
}
if options != nil && options.SourceContentMD5 != nil {
req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
}
if options != nil && options.SourceContentcrc64 != nil {
req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
}
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
req.Raw().Header["x-ms-source-if-modified-since"] = []string{sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if options != nil && options.CopySourceAuthorization != nil {
req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
// stageBlockFromURLHandleResponse handles the StageBlockFromURL response.
func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) {
result := BlockBlobClientStageBlockFromURLResponse{}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlockBlobClientStageBlockFromURLResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
xMSContentCRC64, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlockBlobClientStageBlockFromURLResponse{}, err
}
result.XMSContentCRC64 = xMSContentCRC64
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
return BlockBlobClientStageBlockFromURLResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
return BlockBlobClientStageBlockFromURLResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
result.EncryptionKeySHA256 = &val
}
if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
result.EncryptionScope = &val
}
return result, nil
}
// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob
// overwrites any existing metadata on the blob. Partial updates are not supported with Put
// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of
// the content of a block blob, use the Put Block List operation.
// If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// contentLength - The length of the request.
// body - Initial data
// options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method.
// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// CpkInfo - CpkInfo contains a group of parameters for the BlobClient.Download method.
// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) {
req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
if err != nil {
return BlockBlobClientUploadResponse{}, err
}
resp, err := client.pl.Do(req)
if err != nil {
return BlockBlobClientUploadResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusCreated) {
return BlockBlobClientUploadResponse{}, runtime.NewResponseError(resp)
}
return client.uploadHandleResponse(resp)
}
// uploadCreateRequest creates the Upload request.
func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil {
req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
}
if options != nil && options.Metadata != nil {
for k, v := range options.Metadata {
req.Raw().Header["x-ms-meta-"+k] = []string{v}
}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
}
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if options != nil && options.Tier != nil {
req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{options.ImmutabilityPolicyExpiry.Format(time.RFC1123)}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
}
if options != nil && options.LegalHold != nil {
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
}
req.Raw().Header["Accept"] = []string{"application/xml"}
return req, req.SetBody(body, "application/octet-stream")
}
// uploadHandleResponse handles the Upload response.
func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) {
result := BlockBlobClientUploadResponse{}
if val := resp.Header.Get("ETag"); val != "" {
result.ETag = (*azcore.ETag)(&val)
}
if val := resp.Header.Get("Last-Modified"); val != "" {
lastModified, err := time.Parse(time.RFC1123, val)
if err != nil {
return BlockBlobClientUploadResponse{}, err
}
result.LastModified = &lastModified
}
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
return BlockBlobClientUploadResponse{}, err
}
result.ContentMD5 = contentMD5
}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
if val := resp.Header.Get("x-ms-version-id"); val != "" {
result.VersionID = &val
}
if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val)
if err != nil {
return BlockBlobClientUploadResponse{}, err
}
result.Date = &date
}
if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
isServerEncrypted, err := strconv.ParseBool(val)
if err != nil {
return BlockBlobClientUploadResponse{}, err
}
result.IsServerEncrypted = &isServerEncrypted
}
if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
result.EncryptionKeySHA256 = &val
}
if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
result.EncryptionScope = &val
}
return result, nil
}

View file

@ -5,15 +5,10 @@
// Licensed under the MIT License. See License.txt in the project root for license information. // Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. // Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated. // Changes may cause incorrect behavior and will be lost if the code is regenerated.
// DO NOT EDIT.
package azblob package generated
const (
moduleName = "azblob"
moduleVersion = "v0.4.1"
)
// AccessTier enum
type AccessTier string type AccessTier string
const ( const (
@ -31,6 +26,7 @@ const (
AccessTierP60 AccessTier = "P60" AccessTierP60 AccessTier = "P60"
AccessTierP70 AccessTier = "P70" AccessTierP70 AccessTier = "P70"
AccessTierP80 AccessTier = "P80" AccessTierP80 AccessTier = "P80"
AccessTierPremium AccessTier = "Premium"
) )
// PossibleAccessTierValues returns the possible values for the AccessTier const type. // PossibleAccessTierValues returns the possible values for the AccessTier const type.
@ -50,15 +46,10 @@ func PossibleAccessTierValues() []AccessTier {
AccessTierP60, AccessTierP60,
AccessTierP70, AccessTierP70,
AccessTierP80, AccessTierP80,
AccessTierPremium,
} }
} }
// ToPtr returns a *AccessTier pointing to the current value.
func (c AccessTier) ToPtr() *AccessTier {
return &c
}
// AccountKind enum
type AccountKind string type AccountKind string
const ( const (
@ -80,12 +71,6 @@ func PossibleAccountKindValues() []AccountKind {
} }
} }
// ToPtr returns a *AccountKind pointing to the current value.
func (c AccountKind) ToPtr() *AccountKind {
return &c
}
// ArchiveStatus enum
type ArchiveStatus string type ArchiveStatus string
const ( const (
@ -101,36 +86,6 @@ func PossibleArchiveStatusValues() []ArchiveStatus {
} }
} }
// ToPtr returns a *ArchiveStatus pointing to the current value.
func (c ArchiveStatus) ToPtr() *ArchiveStatus {
return &c
}
// BlobExpiryOptions enum
type BlobExpiryOptions string
const (
BlobExpiryOptionsAbsolute BlobExpiryOptions = "Absolute"
BlobExpiryOptionsNeverExpire BlobExpiryOptions = "NeverExpire"
BlobExpiryOptionsRelativeToCreation BlobExpiryOptions = "RelativeToCreation"
BlobExpiryOptionsRelativeToNow BlobExpiryOptions = "RelativeToNow"
)
// PossibleBlobExpiryOptionsValues returns the possible values for the BlobExpiryOptions const type.
func PossibleBlobExpiryOptionsValues() []BlobExpiryOptions {
return []BlobExpiryOptions{
BlobExpiryOptionsAbsolute,
BlobExpiryOptionsNeverExpire,
BlobExpiryOptionsRelativeToCreation,
BlobExpiryOptionsRelativeToNow,
}
}
// ToPtr returns a *BlobExpiryOptions pointing to the current value.
func (c BlobExpiryOptions) ToPtr() *BlobExpiryOptions {
return &c
}
// BlobGeoReplicationStatus - The status of the secondary location // BlobGeoReplicationStatus - The status of the secondary location
type BlobGeoReplicationStatus string type BlobGeoReplicationStatus string
@ -149,35 +104,6 @@ func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus {
} }
} }
// ToPtr returns a *BlobGeoReplicationStatus pointing to the current value.
func (c BlobGeoReplicationStatus) ToPtr() *BlobGeoReplicationStatus {
return &c
}
// BlobImmutabilityPolicyMode enum
type BlobImmutabilityPolicyMode string
const (
BlobImmutabilityPolicyModeMutable BlobImmutabilityPolicyMode = "Mutable"
BlobImmutabilityPolicyModeUnlocked BlobImmutabilityPolicyMode = "Unlocked"
BlobImmutabilityPolicyModeLocked BlobImmutabilityPolicyMode = "Locked"
)
// PossibleBlobImmutabilityPolicyModeValues returns the possible values for the BlobImmutabilityPolicyMode const type.
func PossibleBlobImmutabilityPolicyModeValues() []BlobImmutabilityPolicyMode {
return []BlobImmutabilityPolicyMode{
BlobImmutabilityPolicyModeMutable,
BlobImmutabilityPolicyModeUnlocked,
BlobImmutabilityPolicyModeLocked,
}
}
// ToPtr returns a *BlobImmutabilityPolicyMode pointing to the current value.
func (c BlobImmutabilityPolicyMode) ToPtr() *BlobImmutabilityPolicyMode {
return &c
}
// BlobType enum
type BlobType string type BlobType string
const ( const (
@ -195,12 +121,6 @@ func PossibleBlobTypeValues() []BlobType {
} }
} }
// ToPtr returns a *BlobType pointing to the current value.
func (c BlobType) ToPtr() *BlobType {
return &c
}
// BlockListType enum
type BlockListType string type BlockListType string
const ( const (
@ -218,12 +138,6 @@ func PossibleBlockListTypeValues() []BlockListType {
} }
} }
// ToPtr returns a *BlockListType pointing to the current value.
func (c BlockListType) ToPtr() *BlockListType {
return &c
}
// CopyStatusType enum
type CopyStatusType string type CopyStatusType string
const ( const (
@ -243,12 +157,6 @@ func PossibleCopyStatusTypeValues() []CopyStatusType {
} }
} }
// ToPtr returns a *CopyStatusType pointing to the current value.
func (c CopyStatusType) ToPtr() *CopyStatusType {
return &c
}
// DeleteSnapshotsOptionType enum
type DeleteSnapshotsOptionType string type DeleteSnapshotsOptionType string
const ( const (
@ -264,12 +172,21 @@ func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType {
} }
} }
// ToPtr returns a *DeleteSnapshotsOptionType pointing to the current value. type DeleteType string
func (c DeleteSnapshotsOptionType) ToPtr() *DeleteSnapshotsOptionType {
return &c const (
DeleteTypeNone DeleteType = "None"
DeleteTypePermanent DeleteType = "Permanent"
)
// PossibleDeleteTypeValues returns the possible values for the DeleteType const type.
func PossibleDeleteTypeValues() []DeleteType {
return []DeleteType{
DeleteTypeNone,
DeleteTypePermanent,
}
} }
// EncryptionAlgorithmType enum
type EncryptionAlgorithmType string type EncryptionAlgorithmType string
const ( const (
@ -285,12 +202,57 @@ func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType {
} }
} }
// ToPtr returns a *EncryptionAlgorithmType pointing to the current value. type ExpiryOptions string
func (c EncryptionAlgorithmType) ToPtr() *EncryptionAlgorithmType {
return &c const (
ExpiryOptionsAbsolute ExpiryOptions = "Absolute"
ExpiryOptionsNeverExpire ExpiryOptions = "NeverExpire"
ExpiryOptionsRelativeToCreation ExpiryOptions = "RelativeToCreation"
ExpiryOptionsRelativeToNow ExpiryOptions = "RelativeToNow"
)
// PossibleExpiryOptionsValues returns the possible values for the ExpiryOptions const type.
func PossibleExpiryOptionsValues() []ExpiryOptions {
return []ExpiryOptions{
ExpiryOptionsAbsolute,
ExpiryOptionsNeverExpire,
ExpiryOptionsRelativeToCreation,
ExpiryOptionsRelativeToNow,
}
}
type ImmutabilityPolicyMode string
const (
ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable"
ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked"
ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked"
)
// PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type.
func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode {
return []ImmutabilityPolicyMode{
ImmutabilityPolicyModeMutable,
ImmutabilityPolicyModeUnlocked,
ImmutabilityPolicyModeLocked,
}
}
type ImmutabilityPolicySetting string
const (
ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked"
ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked"
)
// PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type.
func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting {
return []ImmutabilityPolicySetting{
ImmutabilityPolicySettingUnlocked,
ImmutabilityPolicySettingLocked,
}
} }
// LeaseDurationType enum
type LeaseDurationType string type LeaseDurationType string
const ( const (
@ -306,12 +268,6 @@ func PossibleLeaseDurationTypeValues() []LeaseDurationType {
} }
} }
// ToPtr returns a *LeaseDurationType pointing to the current value.
func (c LeaseDurationType) ToPtr() *LeaseDurationType {
return &c
}
// LeaseStateType enum
type LeaseStateType string type LeaseStateType string
const ( const (
@ -333,12 +289,6 @@ func PossibleLeaseStateTypeValues() []LeaseStateType {
} }
} }
// ToPtr returns a *LeaseStateType pointing to the current value.
func (c LeaseStateType) ToPtr() *LeaseStateType {
return &c
}
// LeaseStatusType enum
type LeaseStatusType string type LeaseStatusType string
const ( const (
@ -354,12 +304,6 @@ func PossibleLeaseStatusTypeValues() []LeaseStatusType {
} }
} }
// ToPtr returns a *LeaseStatusType pointing to the current value.
func (c LeaseStatusType) ToPtr() *LeaseStatusType {
return &c
}
// ListBlobsIncludeItem enum
type ListBlobsIncludeItem string type ListBlobsIncludeItem string
const ( const (
@ -391,12 +335,6 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem {
} }
} }
// ToPtr returns a *ListBlobsIncludeItem pointing to the current value.
func (c ListBlobsIncludeItem) ToPtr() *ListBlobsIncludeItem {
return &c
}
// ListContainersIncludeType enum
type ListContainersIncludeType string type ListContainersIncludeType string
const ( const (
@ -414,12 +352,6 @@ func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType {
} }
} }
// ToPtr returns a *ListContainersIncludeType pointing to the current value.
func (c ListContainersIncludeType) ToPtr() *ListContainersIncludeType {
return &c
}
// PremiumPageBlobAccessTier enum
type PremiumPageBlobAccessTier string type PremiumPageBlobAccessTier string
const ( const (
@ -453,12 +385,6 @@ func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier {
} }
} }
// ToPtr returns a *PremiumPageBlobAccessTier pointing to the current value.
func (c PremiumPageBlobAccessTier) ToPtr() *PremiumPageBlobAccessTier {
return &c
}
// PublicAccessType enum
type PublicAccessType string type PublicAccessType string
const ( const (
@ -474,11 +400,6 @@ func PossiblePublicAccessTypeValues() []PublicAccessType {
} }
} }
// ToPtr returns a *PublicAccessType pointing to the current value.
func (c PublicAccessType) ToPtr() *PublicAccessType {
return &c
}
// QueryFormatType - The quick query format type. // QueryFormatType - The quick query format type.
type QueryFormatType string type QueryFormatType string
@ -499,11 +420,6 @@ func PossibleQueryFormatTypeValues() []QueryFormatType {
} }
} }
// ToPtr returns a *QueryFormatType pointing to the current value.
func (c QueryFormatType) ToPtr() *QueryFormatType {
return &c
}
// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. // RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate.
// Valid values are High and Standard. // Valid values are High and Standard.
type RehydratePriority string type RehydratePriority string
@ -521,12 +437,6 @@ func PossibleRehydratePriorityValues() []RehydratePriority {
} }
} }
// ToPtr returns a *RehydratePriority pointing to the current value.
func (c RehydratePriority) ToPtr() *RehydratePriority {
return &c
}
// SKUName enum
type SKUName string type SKUName string
const ( const (
@ -548,12 +458,6 @@ func PossibleSKUNameValues() []SKUName {
} }
} }
// ToPtr returns a *SKUName pointing to the current value.
func (c SKUName) ToPtr() *SKUName {
return &c
}
// SequenceNumberActionType enum
type SequenceNumberActionType string type SequenceNumberActionType string
const ( const (
@ -571,11 +475,6 @@ func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType {
} }
} }
// ToPtr returns a *SequenceNumberActionType pointing to the current value.
func (c SequenceNumberActionType) ToPtr() *SequenceNumberActionType {
return &c
}
// StorageErrorCode - Error codes returned by the service // StorageErrorCode - Error codes returned by the service
type StorageErrorCode string type StorageErrorCode string
@ -616,7 +515,7 @@ const (
StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey"
StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch"
StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCode = "IncrementalCopyBlobMismatch" StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCode = "IncrementalCopyBlobMismatch"
StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEarlierVersionSnapshotNotAllowed"
StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot" StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot"
StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired" StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired"
StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions"
@ -734,7 +633,7 @@ func PossibleStorageErrorCodeValues() []StorageErrorCode {
StorageErrorCodeEmptyMetadataKey, StorageErrorCodeEmptyMetadataKey,
StorageErrorCodeFeatureVersionMismatch, StorageErrorCodeFeatureVersionMismatch,
StorageErrorCodeIncrementalCopyBlobMismatch, StorageErrorCodeIncrementalCopyBlobMismatch,
StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, StorageErrorCodeIncrementalCopyOfEarlierVersionSnapshotNotAllowed,
StorageErrorCodeIncrementalCopySourceMustBeSnapshot, StorageErrorCodeIncrementalCopySourceMustBeSnapshot,
StorageErrorCodeInfiniteLeaseDurationRequired, StorageErrorCodeInfiniteLeaseDurationRequired,
StorageErrorCodeInsufficientAccountPermissions, StorageErrorCodeInsufficientAccountPermissions,
@ -813,29 +712,3 @@ func PossibleStorageErrorCodeValues() []StorageErrorCode {
StorageErrorCodeUnsupportedXMLNode, StorageErrorCodeUnsupportedXMLNode,
} }
} }
// ToPtr returns a *StorageErrorCode pointing to the current value.
func (c StorageErrorCode) ToPtr() *StorageErrorCode {
return &c
}
// BlobDeleteType enum
type BlobDeleteType string
const (
BlobDeleteTypeNone BlobDeleteType = "None"
BlobDeleteTypePermanent BlobDeleteType = "Permanent"
)
// PossibleBlobDeleteTypeValues returns the possible values for the BlobDeleteType const type.
func PossibleBlobDeleteTypeValues() []BlobDeleteType {
return []BlobDeleteType{
BlobDeleteTypeNone,
BlobDeleteTypePermanent,
}
}
// ToPtr returns a *BlobDeleteType pointing to the current value.
func (c BlobDeleteType) ToPtr() *BlobDeleteType {
return &c
}

View file

@ -0,0 +1,481 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// DO NOT EDIT.
package generated
import (
"encoding/json"
"encoding/xml"
"fmt"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"reflect"
"time"
)
// MarshalXML implements the xml.Marshaller interface for type AccessPolicy.
func (a AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias AccessPolicy
aux := &struct {
*alias
Expiry *timeRFC3339 `xml:"Expiry"`
Start *timeRFC3339 `xml:"Start"`
}{
alias: (*alias)(&a),
Expiry: (*timeRFC3339)(a.Expiry),
Start: (*timeRFC3339)(a.Start),
}
return e.EncodeElement(aux, start)
}
// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy.
func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type alias AccessPolicy
aux := &struct {
*alias
Expiry *timeRFC3339 `xml:"Expiry"`
Start *timeRFC3339 `xml:"Start"`
}{
alias: (*alias)(a),
}
if err := d.DecodeElement(aux, &start); err != nil {
return err
}
a.Expiry = (*time.Time)(aux.Expiry)
a.Start = (*time.Time)(aux.Start)
return nil
}
// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration.
func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias ArrowConfiguration
aux := &struct {
*alias
Schema *[]*ArrowField `xml:"Schema>Field"`
}{
alias: (*alias)(&a),
}
if a.Schema != nil {
aux.Schema = &a.Schema
}
return e.EncodeElement(aux, start)
}
// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment.
func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias BlobFlatListSegment
aux := &struct {
*alias
BlobItems *[]*BlobItemInternal `xml:"Blob"`
}{
alias: (*alias)(&b),
}
if b.BlobItems != nil {
aux.BlobItems = &b.BlobItems
}
return e.EncodeElement(aux, start)
}
// MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment.
func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias BlobHierarchyListSegment
aux := &struct {
*alias
BlobItems *[]*BlobItemInternal `xml:"Blob"`
BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"`
}{
alias: (*alias)(&b),
}
if b.BlobItems != nil {
aux.BlobItems = &b.BlobItems
}
if b.BlobPrefixes != nil {
aux.BlobPrefixes = &b.BlobPrefixes
}
return e.EncodeElement(aux, start)
}
// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItemInternal.
func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type alias BlobItemInternal
aux := &struct {
*alias
Metadata additionalProperties `xml:"Metadata"`
OrMetadata additionalProperties `xml:"OrMetadata"`
}{
alias: (*alias)(b),
}
if err := d.DecodeElement(aux, &start); err != nil {
return err
}
b.Metadata = (map[string]*string)(aux.Metadata)
b.OrMetadata = (map[string]*string)(aux.OrMetadata)
return nil
}
// MarshalXML implements the xml.Marshaller interface for type BlobPropertiesInternal.
func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias BlobPropertiesInternal
aux := &struct {
*alias
AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"`
ContentMD5 *string `xml:"Content-MD5"`
CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"`
CreationTime *timeRFC1123 `xml:"Creation-Time"`
DeletedTime *timeRFC1123 `xml:"DeletedTime"`
ExpiresOn *timeRFC1123 `xml:"Expiry-Time"`
ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"`
LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"`
LastModified *timeRFC1123 `xml:"Last-Modified"`
}{
alias: (*alias)(&b),
AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime),
CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime),
CreationTime: (*timeRFC1123)(b.CreationTime),
DeletedTime: (*timeRFC1123)(b.DeletedTime),
ExpiresOn: (*timeRFC1123)(b.ExpiresOn),
ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn),
LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn),
LastModified: (*timeRFC1123)(b.LastModified),
}
if b.ContentMD5 != nil {
encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat)
aux.ContentMD5 = &encodedContentMD5
}
return e.EncodeElement(aux, start)
}
// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPropertiesInternal.
func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type alias BlobPropertiesInternal
aux := &struct {
*alias
AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"`
ContentMD5 *string `xml:"Content-MD5"`
CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"`
CreationTime *timeRFC1123 `xml:"Creation-Time"`
DeletedTime *timeRFC1123 `xml:"DeletedTime"`
ExpiresOn *timeRFC1123 `xml:"Expiry-Time"`
ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"`
LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"`
LastModified *timeRFC1123 `xml:"Last-Modified"`
}{
alias: (*alias)(b),
}
if err := d.DecodeElement(aux, &start); err != nil {
return err
}
b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime)
if aux.ContentMD5 != nil {
if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil {
return err
}
}
b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime)
b.CreationTime = (*time.Time)(aux.CreationTime)
b.DeletedTime = (*time.Time)(aux.DeletedTime)
b.ExpiresOn = (*time.Time)(aux.ExpiresOn)
b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn)
b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn)
b.LastModified = (*time.Time)(aux.LastModified)
return nil
}
// MarshalXML implements the xml.Marshaller interface for type BlobTags.
func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
start.Name.Local = "Tags"
type alias BlobTags
aux := &struct {
*alias
BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"`
}{
alias: (*alias)(&b),
}
if b.BlobTagSet != nil {
aux.BlobTagSet = &b.BlobTagSet
}
return e.EncodeElement(aux, start)
}
// MarshalXML implements the xml.Marshaller interface for type BlockList.
func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias BlockList
aux := &struct {
*alias
CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"`
UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"`
}{
alias: (*alias)(&b),
}
if b.CommittedBlocks != nil {
aux.CommittedBlocks = &b.CommittedBlocks
}
if b.UncommittedBlocks != nil {
aux.UncommittedBlocks = &b.UncommittedBlocks
}
return e.EncodeElement(aux, start)
}
// MarshalXML implements the xml.Marshaller interface for type BlockLookupList.
func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
start.Name.Local = "BlockList"
type alias BlockLookupList
aux := &struct {
*alias
Committed *[]*string `xml:"Committed"`
Latest *[]*string `xml:"Latest"`
Uncommitted *[]*string `xml:"Uncommitted"`
}{
alias: (*alias)(&b),
}
if b.Committed != nil {
aux.Committed = &b.Committed
}
if b.Latest != nil {
aux.Latest = &b.Latest
}
if b.Uncommitted != nil {
aux.Uncommitted = &b.Uncommitted
}
return e.EncodeElement(aux, start)
}
// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem.
func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type alias ContainerItem
aux := &struct {
*alias
Metadata additionalProperties `xml:"Metadata"`
}{
alias: (*alias)(c),
}
if err := d.DecodeElement(aux, &start); err != nil {
return err
}
c.Metadata = (map[string]*string)(aux.Metadata)
return nil
}
// MarshalXML implements the xml.Marshaller interface for type ContainerProperties.
func (c ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias ContainerProperties
aux := &struct {
*alias
DeletedTime *timeRFC1123 `xml:"DeletedTime"`
LastModified *timeRFC1123 `xml:"Last-Modified"`
}{
alias: (*alias)(&c),
DeletedTime: (*timeRFC1123)(c.DeletedTime),
LastModified: (*timeRFC1123)(c.LastModified),
}
return e.EncodeElement(aux, start)
}
// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties.
func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type alias ContainerProperties
aux := &struct {
*alias
DeletedTime *timeRFC1123 `xml:"DeletedTime"`
LastModified *timeRFC1123 `xml:"Last-Modified"`
}{
alias: (*alias)(c),
}
if err := d.DecodeElement(aux, &start); err != nil {
return err
}
c.DeletedTime = (*time.Time)(aux.DeletedTime)
c.LastModified = (*time.Time)(aux.LastModified)
return nil
}
// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment.
func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias FilterBlobSegment
aux := &struct {
*alias
Blobs *[]*FilterBlobItem `xml:"Blobs>Blob"`
}{
alias: (*alias)(&f),
}
if f.Blobs != nil {
aux.Blobs = &f.Blobs
}
return e.EncodeElement(aux, start)
}
// MarshalXML implements the xml.Marshaller interface for type GeoReplication.
func (g GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias GeoReplication
aux := &struct {
*alias
LastSyncTime *timeRFC1123 `xml:"LastSyncTime"`
}{
alias: (*alias)(&g),
LastSyncTime: (*timeRFC1123)(g.LastSyncTime),
}
return e.EncodeElement(aux, start)
}
// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication.
func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type alias GeoReplication
aux := &struct {
*alias
LastSyncTime *timeRFC1123 `xml:"LastSyncTime"`
}{
alias: (*alias)(g),
}
if err := d.DecodeElement(aux, &start); err != nil {
return err
}
g.LastSyncTime = (*time.Time)(aux.LastSyncTime)
return nil
}
// MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse.
func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias ListContainersSegmentResponse
aux := &struct {
*alias
ContainerItems *[]*ContainerItem `xml:"Containers>Container"`
}{
alias: (*alias)(&l),
}
if l.ContainerItems != nil {
aux.ContainerItems = &l.ContainerItems
}
return e.EncodeElement(aux, start)
}
// MarshalXML implements the xml.Marshaller interface for type PageList.
func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias PageList
aux := &struct {
*alias
ClearRange *[]*ClearRange `xml:"ClearRange"`
PageRange *[]*PageRange `xml:"PageRange"`
}{
alias: (*alias)(&p),
}
if p.ClearRange != nil {
aux.ClearRange = &p.ClearRange
}
if p.PageRange != nil {
aux.PageRange = &p.PageRange
}
return e.EncodeElement(aux, start)
}
// MarshalXML implements the xml.Marshaller interface for type QueryRequest.
func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
start.Name.Local = "QueryRequest"
type alias QueryRequest
aux := &struct {
*alias
}{
alias: (*alias)(&q),
}
return e.EncodeElement(aux, start)
}
// MarshalJSON implements the json.Marshaller interface for type StorageError.
func (s StorageError) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
populate(objectMap, "Message", s.Message)
return json.Marshal(objectMap)
}
// UnmarshalJSON implements the json.Unmarshaller interface for type StorageError.
func (s *StorageError) UnmarshalJSON(data []byte) error {
var rawMsg map[string]json.RawMessage
if err := json.Unmarshal(data, &rawMsg); err != nil {
return fmt.Errorf("unmarshalling type %T: %v", s, err)
}
for key, val := range rawMsg {
var err error
switch key {
case "Message":
err = unpopulate(val, "Message", &s.Message)
delete(rawMsg, key)
}
if err != nil {
return fmt.Errorf("unmarshalling type %T: %v", s, err)
}
}
return nil
}
// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties.
func (s StorageServiceProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias StorageServiceProperties
aux := &struct {
*alias
Cors *[]*CorsRule `xml:"Cors>CorsRule"`
}{
alias: (*alias)(&s),
}
if s.Cors != nil {
aux.Cors = &s.Cors
}
return e.EncodeElement(aux, start)
}
// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey.
func (u UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
type alias UserDelegationKey
aux := &struct {
*alias
SignedExpiry *timeRFC3339 `xml:"SignedExpiry"`
SignedStart *timeRFC3339 `xml:"SignedStart"`
}{
alias: (*alias)(&u),
SignedExpiry: (*timeRFC3339)(u.SignedExpiry),
SignedStart: (*timeRFC3339)(u.SignedStart),
}
return e.EncodeElement(aux, start)
}
// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey.
func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type alias UserDelegationKey
aux := &struct {
*alias
SignedExpiry *timeRFC3339 `xml:"SignedExpiry"`
SignedStart *timeRFC3339 `xml:"SignedStart"`
}{
alias: (*alias)(u),
}
if err := d.DecodeElement(aux, &start); err != nil {
return err
}
u.SignedExpiry = (*time.Time)(aux.SignedExpiry)
u.SignedStart = (*time.Time)(aux.SignedStart)
return nil
}
func populate(m map[string]interface{}, k string, v interface{}) {
if v == nil {
return
} else if azcore.IsNullValue(v) {
m[k] = nil
} else if !reflect.ValueOf(v).IsNil() {
m[k] = v
}
}
func unpopulate(data json.RawMessage, fn string, v interface{}) error {
if data == nil {
return nil
}
if err := json.Unmarshal(data, v); err != nil {
return fmt.Errorf("struct field %s: %v", fn, err)
}
return nil
}

File diff suppressed because it is too large Load diff

View file

@ -5,8 +5,9 @@
// Licensed under the MIT License. See License.txt in the project root for license information. // Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. // Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated. // Changes may cause incorrect behavior and will be lost if the code is regenerated.
// DO NOT EDIT.
package azblob package generated
import ( import (
"context" "context"
@ -20,16 +21,18 @@ import (
"time" "time"
) )
type serviceClient struct { // ServiceClient contains the methods for the Service group.
// Don't use this type directly, use NewServiceClient() instead.
type ServiceClient struct {
endpoint string endpoint string
pl runtime.Pipeline pl runtime.Pipeline
} }
// newServiceClient creates a new instance of serviceClient with the specified values. // NewServiceClient creates a new instance of ServiceClient with the specified values.
// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. // endpoint - The URL of the service account, container, or blob that is the target of the desired operation.
// pl - the pipeline used for sending requests and handling responses. // pl - the pipeline used for sending requests and handling responses.
func newServiceClient(endpoint string, pl runtime.Pipeline) *serviceClient { func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient {
client := &serviceClient{ client := &ServiceClient{
endpoint: endpoint, endpoint: endpoint,
pl: pl, pl: pl,
} }
@ -40,24 +43,25 @@ func newServiceClient(endpoint string, pl runtime.Pipeline) *serviceClient {
// expression. Filter blobs searches across all containers within a storage account but can // expression. Filter blobs searches across all containers within a storage account but can
// be scoped within the expression to a single container. // be scoped within the expression to a single container.
// If the operation fails it returns an *azcore.ResponseError type. // If the operation fails it returns an *azcore.ResponseError type.
// options - serviceClientFilterBlobsOptions contains the optional parameters for the serviceClient.FilterBlobs method. // Generated from API version 2020-10-02
func (client *serviceClient) FilterBlobs(ctx context.Context, options *serviceClientFilterBlobsOptions) (serviceClientFilterBlobsResponse, error) { // options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method.
func (client *ServiceClient) FilterBlobs(ctx context.Context, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) {
req, err := client.filterBlobsCreateRequest(ctx, options) req, err := client.filterBlobsCreateRequest(ctx, options)
if err != nil { if err != nil {
return serviceClientFilterBlobsResponse{}, err return ServiceClientFilterBlobsResponse{}, err
} }
resp, err := client.pl.Do(req) resp, err := client.pl.Do(req)
if err != nil { if err != nil {
return serviceClientFilterBlobsResponse{}, err return ServiceClientFilterBlobsResponse{}, err
} }
if !runtime.HasStatusCode(resp, http.StatusOK) { if !runtime.HasStatusCode(resp, http.StatusOK) {
return serviceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) return ServiceClientFilterBlobsResponse{}, runtime.NewResponseError(resp)
} }
return client.filterBlobsHandleResponse(resp) return client.filterBlobsHandleResponse(resp)
} }
// filterBlobsCreateRequest creates the FilterBlobs request. // filterBlobsCreateRequest creates the FilterBlobs request.
func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, options *serviceClientFilterBlobsOptions) (*policy.Request, error) { func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, options *ServiceClientFilterBlobsOptions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
@ -77,17 +81,17 @@ func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, optio
reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10))
} }
req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-version", "2020-10-02") req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil { if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
} }
req.Raw().Header.Set("Accept", "application/xml") req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil return req, nil
} }
// filterBlobsHandleResponse handles the FilterBlobs response. // filterBlobsHandleResponse handles the FilterBlobs response.
func (client *serviceClient) filterBlobsHandleResponse(resp *http.Response) (serviceClientFilterBlobsResponse, error) { func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (ServiceClientFilterBlobsResponse, error) {
result := serviceClientFilterBlobsResponse{RawResponse: resp} result := ServiceClientFilterBlobsResponse{}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" { if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val result.ClientRequestID = &val
} }
@ -100,36 +104,37 @@ func (client *serviceClient) filterBlobsHandleResponse(resp *http.Response) (ser
if val := resp.Header.Get("Date"); val != "" { if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val) date, err := time.Parse(time.RFC1123, val)
if err != nil { if err != nil {
return serviceClientFilterBlobsResponse{}, err return ServiceClientFilterBlobsResponse{}, err
} }
result.Date = &date result.Date = &date
} }
if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil {
return serviceClientFilterBlobsResponse{}, err return ServiceClientFilterBlobsResponse{}, err
} }
return result, nil return result, nil
} }
// GetAccountInfo - Returns the sku name and account kind // GetAccountInfo - Returns the sku name and account kind
// If the operation fails it returns an *azcore.ResponseError type. // If the operation fails it returns an *azcore.ResponseError type.
// options - serviceClientGetAccountInfoOptions contains the optional parameters for the serviceClient.GetAccountInfo method. // Generated from API version 2020-10-02
func (client *serviceClient) GetAccountInfo(ctx context.Context, options *serviceClientGetAccountInfoOptions) (serviceClientGetAccountInfoResponse, error) { // options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method.
func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) {
req, err := client.getAccountInfoCreateRequest(ctx, options) req, err := client.getAccountInfoCreateRequest(ctx, options)
if err != nil { if err != nil {
return serviceClientGetAccountInfoResponse{}, err return ServiceClientGetAccountInfoResponse{}, err
} }
resp, err := client.pl.Do(req) resp, err := client.pl.Do(req)
if err != nil { if err != nil {
return serviceClientGetAccountInfoResponse{}, err return ServiceClientGetAccountInfoResponse{}, err
} }
if !runtime.HasStatusCode(resp, http.StatusOK) { if !runtime.HasStatusCode(resp, http.StatusOK) {
return serviceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) return ServiceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp)
} }
return client.getAccountInfoHandleResponse(resp) return client.getAccountInfoHandleResponse(resp)
} }
// getAccountInfoCreateRequest creates the GetAccountInfo request. // getAccountInfoCreateRequest creates the GetAccountInfo request.
func (client *serviceClient) getAccountInfoCreateRequest(ctx context.Context, options *serviceClientGetAccountInfoOptions) (*policy.Request, error) { func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
@ -138,14 +143,14 @@ func (client *serviceClient) getAccountInfoCreateRequest(ctx context.Context, op
reqQP.Set("restype", "account") reqQP.Set("restype", "account")
reqQP.Set("comp", "properties") reqQP.Set("comp", "properties")
req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-version", "2020-10-02") req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
req.Raw().Header.Set("Accept", "application/xml") req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil return req, nil
} }
// getAccountInfoHandleResponse handles the GetAccountInfo response. // getAccountInfoHandleResponse handles the GetAccountInfo response.
func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (serviceClientGetAccountInfoResponse, error) { func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) {
result := serviceClientGetAccountInfoResponse{RawResponse: resp} result := ServiceClientGetAccountInfoResponse{}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" { if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val result.ClientRequestID = &val
} }
@ -158,7 +163,7 @@ func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (
if val := resp.Header.Get("Date"); val != "" { if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val) date, err := time.Parse(time.RFC1123, val)
if err != nil { if err != nil {
return serviceClientGetAccountInfoResponse{}, err return ServiceClientGetAccountInfoResponse{}, err
} }
result.Date = &date result.Date = &date
} }
@ -171,7 +176,7 @@ func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (
if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" {
isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val)
if err != nil { if err != nil {
return serviceClientGetAccountInfoResponse{}, err return ServiceClientGetAccountInfoResponse{}, err
} }
result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled
} }
@ -181,24 +186,25 @@ func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (
// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and // GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and
// CORS (Cross-Origin Resource Sharing) rules. // CORS (Cross-Origin Resource Sharing) rules.
// If the operation fails it returns an *azcore.ResponseError type. // If the operation fails it returns an *azcore.ResponseError type.
// options - serviceClientGetPropertiesOptions contains the optional parameters for the serviceClient.GetProperties method. // Generated from API version 2020-10-02
func (client *serviceClient) GetProperties(ctx context.Context, options *serviceClientGetPropertiesOptions) (serviceClientGetPropertiesResponse, error) { // options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method.
func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) {
req, err := client.getPropertiesCreateRequest(ctx, options) req, err := client.getPropertiesCreateRequest(ctx, options)
if err != nil { if err != nil {
return serviceClientGetPropertiesResponse{}, err return ServiceClientGetPropertiesResponse{}, err
} }
resp, err := client.pl.Do(req) resp, err := client.pl.Do(req)
if err != nil { if err != nil {
return serviceClientGetPropertiesResponse{}, err return ServiceClientGetPropertiesResponse{}, err
} }
if !runtime.HasStatusCode(resp, http.StatusOK) { if !runtime.HasStatusCode(resp, http.StatusOK) {
return serviceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp)
} }
return client.getPropertiesHandleResponse(resp) return client.getPropertiesHandleResponse(resp)
} }
// getPropertiesCreateRequest creates the GetProperties request. // getPropertiesCreateRequest creates the GetProperties request.
func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, options *serviceClientGetPropertiesOptions) (*policy.Request, error) { func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, options *ServiceClientGetPropertiesOptions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
@ -210,17 +216,17 @@ func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, opt
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
} }
req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-version", "2020-10-02") req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil { if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
} }
req.Raw().Header.Set("Accept", "application/xml") req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil return req, nil
} }
// getPropertiesHandleResponse handles the GetProperties response. // getPropertiesHandleResponse handles the GetProperties response.
func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (serviceClientGetPropertiesResponse, error) { func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (ServiceClientGetPropertiesResponse, error) {
result := serviceClientGetPropertiesResponse{RawResponse: resp} result := ServiceClientGetPropertiesResponse{}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" { if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val result.ClientRequestID = &val
} }
@ -231,7 +237,7 @@ func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (s
result.Version = &val result.Version = &val
} }
if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil {
return serviceClientGetPropertiesResponse{}, err return ServiceClientGetPropertiesResponse{}, err
} }
return result, nil return result, nil
} }
@ -239,24 +245,25 @@ func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (s
// GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary // GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary
// location endpoint when read-access geo-redundant replication is enabled for the storage account. // location endpoint when read-access geo-redundant replication is enabled for the storage account.
// If the operation fails it returns an *azcore.ResponseError type. // If the operation fails it returns an *azcore.ResponseError type.
// options - serviceClientGetStatisticsOptions contains the optional parameters for the serviceClient.GetStatistics method. // Generated from API version 2020-10-02
func (client *serviceClient) GetStatistics(ctx context.Context, options *serviceClientGetStatisticsOptions) (serviceClientGetStatisticsResponse, error) { // options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method.
func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) {
req, err := client.getStatisticsCreateRequest(ctx, options) req, err := client.getStatisticsCreateRequest(ctx, options)
if err != nil { if err != nil {
return serviceClientGetStatisticsResponse{}, err return ServiceClientGetStatisticsResponse{}, err
} }
resp, err := client.pl.Do(req) resp, err := client.pl.Do(req)
if err != nil { if err != nil {
return serviceClientGetStatisticsResponse{}, err return ServiceClientGetStatisticsResponse{}, err
} }
if !runtime.HasStatusCode(resp, http.StatusOK) { if !runtime.HasStatusCode(resp, http.StatusOK) {
return serviceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp)
} }
return client.getStatisticsHandleResponse(resp) return client.getStatisticsHandleResponse(resp)
} }
// getStatisticsCreateRequest creates the GetStatistics request. // getStatisticsCreateRequest creates the GetStatistics request.
func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, options *serviceClientGetStatisticsOptions) (*policy.Request, error) { func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, options *ServiceClientGetStatisticsOptions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
@ -268,17 +275,17 @@ func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, opt
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
} }
req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-version", "2020-10-02") req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil { if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
} }
req.Raw().Header.Set("Accept", "application/xml") req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil return req, nil
} }
// getStatisticsHandleResponse handles the GetStatistics response. // getStatisticsHandleResponse handles the GetStatistics response.
func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (serviceClientGetStatisticsResponse, error) { func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (ServiceClientGetStatisticsResponse, error) {
result := serviceClientGetStatisticsResponse{RawResponse: resp} result := ServiceClientGetStatisticsResponse{}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" { if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val result.ClientRequestID = &val
} }
@ -291,12 +298,12 @@ func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (s
if val := resp.Header.Get("Date"); val != "" { if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val) date, err := time.Parse(time.RFC1123, val)
if err != nil { if err != nil {
return serviceClientGetStatisticsResponse{}, err return ServiceClientGetStatisticsResponse{}, err
} }
result.Date = &date result.Date = &date
} }
if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil {
return serviceClientGetStatisticsResponse{}, err return ServiceClientGetStatisticsResponse{}, err
} }
return result, nil return result, nil
} }
@ -304,26 +311,27 @@ func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (s
// GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using // GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using
// bearer token authentication. // bearer token authentication.
// If the operation fails it returns an *azcore.ResponseError type. // If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// keyInfo - Key information // keyInfo - Key information
// options - serviceClientGetUserDelegationKeyOptions contains the optional parameters for the serviceClient.GetUserDelegationKey // options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey
// method. // method.
func (client *serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (serviceClientGetUserDelegationKeyResponse, error) { func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) {
req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options)
if err != nil { if err != nil {
return serviceClientGetUserDelegationKeyResponse{}, err return ServiceClientGetUserDelegationKeyResponse{}, err
} }
resp, err := client.pl.Do(req) resp, err := client.pl.Do(req)
if err != nil { if err != nil {
return serviceClientGetUserDelegationKeyResponse{}, err return ServiceClientGetUserDelegationKeyResponse{}, err
} }
if !runtime.HasStatusCode(resp, http.StatusOK) { if !runtime.HasStatusCode(resp, http.StatusOK) {
return serviceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) return ServiceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp)
} }
return client.getUserDelegationKeyHandleResponse(resp) return client.getUserDelegationKeyHandleResponse(resp)
} }
// getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. // getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request.
func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (*policy.Request, error) { func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
@ -335,17 +343,17 @@ func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Conte
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
} }
req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-version", "2020-10-02") req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil { if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
} }
req.Raw().Header.Set("Accept", "application/xml") req.Raw().Header["Accept"] = []string{"application/xml"}
return req, runtime.MarshalAsXML(req, keyInfo) return req, runtime.MarshalAsXML(req, keyInfo)
} }
// getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. // getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response.
func (client *serviceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (serviceClientGetUserDelegationKeyResponse, error) { func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (ServiceClientGetUserDelegationKeyResponse, error) {
result := serviceClientGetUserDelegationKeyResponse{RawResponse: resp} result := ServiceClientGetUserDelegationKeyResponse{}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" { if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val result.ClientRequestID = &val
} }
@ -358,34 +366,24 @@ func (client *serviceClient) getUserDelegationKeyHandleResponse(resp *http.Respo
if val := resp.Header.Get("Date"); val != "" { if val := resp.Header.Get("Date"); val != "" {
date, err := time.Parse(time.RFC1123, val) date, err := time.Parse(time.RFC1123, val)
if err != nil { if err != nil {
return serviceClientGetUserDelegationKeyResponse{}, err return ServiceClientGetUserDelegationKeyResponse{}, err
} }
result.Date = &date result.Date = &date
} }
if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil {
return serviceClientGetUserDelegationKeyResponse{}, err return ServiceClientGetUserDelegationKeyResponse{}, err
} }
return result, nil return result, nil
} }
// ListContainersSegment - The List Containers Segment operation returns a list of the containers under the specified account // NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified
// account
// If the operation fails it returns an *azcore.ResponseError type. // If the operation fails it returns an *azcore.ResponseError type.
// options - serviceClientListContainersSegmentOptions contains the optional parameters for the serviceClient.ListContainersSegment // Generated from API version 2020-10-02
// options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.ListContainersSegment
// method. // method.
func (client *serviceClient) ListContainersSegment(options *serviceClientListContainersSegmentOptions) *serviceClientListContainersSegmentPager {
return &serviceClientListContainersSegmentPager{
client: client,
requester: func(ctx context.Context) (*policy.Request, error) {
return client.listContainersSegmentCreateRequest(ctx, options)
},
advancer: func(ctx context.Context, resp serviceClientListContainersSegmentResponse) (*policy.Request, error) {
return runtime.NewRequest(ctx, http.MethodGet, *resp.ListContainersSegmentResponse.NextMarker)
},
}
}
// listContainersSegmentCreateRequest creates the ListContainersSegment request. // listContainersSegmentCreateRequest creates the ListContainersSegment request.
func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Context, options *serviceClientListContainersSegmentOptions) (*policy.Request, error) { func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Context, options *ServiceClientListContainersSegmentOptions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
@ -408,17 +406,17 @@ func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Cont
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
} }
req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-version", "2020-10-02") req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil { if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
} }
req.Raw().Header.Set("Accept", "application/xml") req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil return req, nil
} }
// listContainersSegmentHandleResponse handles the ListContainersSegment response. // listContainersSegmentHandleResponse handles the ListContainersSegment response.
func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Response) (serviceClientListContainersSegmentResponse, error) { func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Response) (ServiceClientListContainersSegmentResponse, error) {
result := serviceClientListContainersSegmentResponse{RawResponse: resp} result := ServiceClientListContainersSegmentResponse{}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" { if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val result.ClientRequestID = &val
} }
@ -429,7 +427,7 @@ func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Resp
result.Version = &val result.Version = &val
} }
if err := runtime.UnmarshalAsXML(resp, &result.ListContainersSegmentResponse); err != nil { if err := runtime.UnmarshalAsXML(resp, &result.ListContainersSegmentResponse); err != nil {
return serviceClientListContainersSegmentResponse{}, err return ServiceClientListContainersSegmentResponse{}, err
} }
return result, nil return result, nil
} }
@ -437,25 +435,26 @@ func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Resp
// SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics // SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics
// and CORS (Cross-Origin Resource Sharing) rules // and CORS (Cross-Origin Resource Sharing) rules
// If the operation fails it returns an *azcore.ResponseError type. // If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// storageServiceProperties - The StorageService properties. // storageServiceProperties - The StorageService properties.
// options - serviceClientSetPropertiesOptions contains the optional parameters for the serviceClient.SetProperties method. // options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method.
func (client *serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (serviceClientSetPropertiesResponse, error) { func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) {
req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options)
if err != nil { if err != nil {
return serviceClientSetPropertiesResponse{}, err return ServiceClientSetPropertiesResponse{}, err
} }
resp, err := client.pl.Do(req) resp, err := client.pl.Do(req)
if err != nil { if err != nil {
return serviceClientSetPropertiesResponse{}, err return ServiceClientSetPropertiesResponse{}, err
} }
if !runtime.HasStatusCode(resp, http.StatusAccepted) { if !runtime.HasStatusCode(resp, http.StatusAccepted) {
return serviceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp)
} }
return client.setPropertiesHandleResponse(resp) return client.setPropertiesHandleResponse(resp)
} }
// setPropertiesCreateRequest creates the SetProperties request. // setPropertiesCreateRequest creates the SetProperties request.
func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (*policy.Request, error) { func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
@ -467,17 +466,17 @@ func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, sto
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
} }
req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("x-ms-version", "2020-10-02") req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil { if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
} }
req.Raw().Header.Set("Accept", "application/xml") req.Raw().Header["Accept"] = []string{"application/xml"}
return req, runtime.MarshalAsXML(req, storageServiceProperties) return req, runtime.MarshalAsXML(req, storageServiceProperties)
} }
// setPropertiesHandleResponse handles the SetProperties response. // setPropertiesHandleResponse handles the SetProperties response.
func (client *serviceClient) setPropertiesHandleResponse(resp *http.Response) (serviceClientSetPropertiesResponse, error) { func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (ServiceClientSetPropertiesResponse, error) {
result := serviceClientSetPropertiesResponse{RawResponse: resp} result := ServiceClientSetPropertiesResponse{}
if val := resp.Header.Get("x-ms-client-request-id"); val != "" { if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val result.ClientRequestID = &val
} }
@ -492,28 +491,29 @@ func (client *serviceClient) setPropertiesHandleResponse(resp *http.Response) (s
// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request.
// If the operation fails it returns an *azcore.ResponseError type. // If the operation fails it returns an *azcore.ResponseError type.
// Generated from API version 2020-10-02
// contentLength - The length of the request. // contentLength - The length of the request.
// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header
// value: multipart/mixed; boundary=batch_ // value: multipart/mixed; boundary=batch_
// body - Initial data // body - Initial data
// options - serviceClientSubmitBatchOptions contains the optional parameters for the serviceClient.SubmitBatch method. // options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method.
func (client *serviceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (serviceClientSubmitBatchResponse, error) { func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) {
req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options)
if err != nil { if err != nil {
return serviceClientSubmitBatchResponse{}, err return ServiceClientSubmitBatchResponse{}, err
} }
resp, err := client.pl.Do(req) resp, err := client.pl.Do(req)
if err != nil { if err != nil {
return serviceClientSubmitBatchResponse{}, err return ServiceClientSubmitBatchResponse{}, err
} }
if !runtime.HasStatusCode(resp, http.StatusOK) { if !runtime.HasStatusCode(resp, http.StatusOK) {
return serviceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp)
} }
return client.submitBatchHandleResponse(resp) return client.submitBatchHandleResponse(resp)
} }
// submitBatchCreateRequest creates the SubmitBatch request. // submitBatchCreateRequest creates the SubmitBatch request.
func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (*policy.Request, error) { func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (*policy.Request, error) {
req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint)
if err != nil { if err != nil {
return nil, err return nil, err
@ -525,19 +525,19 @@ func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, conte
} }
req.Raw().URL.RawQuery = reqQP.Encode() req.Raw().URL.RawQuery = reqQP.Encode()
runtime.SkipBodyDownload(req) runtime.SkipBodyDownload(req)
req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
req.Raw().Header.Set("Content-Type", multipartContentType) req.Raw().Header["Content-Type"] = []string{multipartContentType}
req.Raw().Header.Set("x-ms-version", "2020-10-02") req.Raw().Header["x-ms-version"] = []string{"2020-10-02"}
if options != nil && options.RequestID != nil { if options != nil && options.RequestID != nil {
req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
} }
req.Raw().Header.Set("Accept", "application/xml") req.Raw().Header["Accept"] = []string{"application/xml"}
return req, runtime.MarshalAsXML(req, body) return req, req.SetBody(body, "application/xml")
} }
// submitBatchHandleResponse handles the SubmitBatch response. // submitBatchHandleResponse handles the SubmitBatch response.
func (client *serviceClient) submitBatchHandleResponse(resp *http.Response) (serviceClientSubmitBatchResponse, error) { func (client *ServiceClient) submitBatchHandleResponse(resp *http.Response) (ServiceClientSubmitBatchResponse, error) {
result := serviceClientSubmitBatchResponse{RawResponse: resp} result := ServiceClientSubmitBatchResponse{Body: resp.Body}
if val := resp.Header.Get("Content-Type"); val != "" { if val := resp.Header.Get("Content-Type"); val != "" {
result.ContentType = &val result.ContentType = &val
} }

View file

@ -5,8 +5,9 @@
// Licensed under the MIT License. See License.txt in the project root for license information. // Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. // Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated. // Changes may cause incorrect behavior and will be lost if the code is regenerated.
// DO NOT EDIT.
package azblob package generated
import ( import (
"strings" "strings"

View file

@ -5,8 +5,9 @@
// Licensed under the MIT License. See License.txt in the project root for license information. // Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. // Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated. // Changes may cause incorrect behavior and will be lost if the code is regenerated.
// DO NOT EDIT.
package azblob package generated
import ( import (
"regexp" "regexp"

View file

@ -5,8 +5,9 @@
// Licensed under the MIT License. See License.txt in the project root for license information. // Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. // Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated. // Changes may cause incorrect behavior and will be lost if the code is regenerated.
// DO NOT EDIT.
package azblob package generated
import ( import (
"encoding/xml" "encoding/xml"

View file

@ -0,0 +1,78 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package shared
import (
"context"
"errors"
)
// BatchTransferOptions identifies options used by doBatchTransfer.
type BatchTransferOptions struct {
TransferSize int64
ChunkSize int64
Concurrency uint16
Operation func(offset int64, chunkSize int64, ctx context.Context) error
OperationName string
}
// DoBatchTransfer helps to execute operations in a batch manner.
// Can be used by users to customize batch works (for other scenarios that the SDK does not provide)
func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error {
if o.ChunkSize == 0 {
return errors.New("ChunkSize cannot be 0")
}
if o.Concurrency == 0 {
o.Concurrency = 5 // default concurrency
}
// Prepare and do parallel operations.
numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1)
operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently
operationResponseChannel := make(chan error, numChunks) // Holds each response
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Create the goroutines that process each operation (in parallel).
for g := uint16(0); g < o.Concurrency; g++ {
//grIndex := g
go func() {
for f := range operationChannel {
err := f()
operationResponseChannel <- err
}
}()
}
// Add each chunk's operation to the channel.
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
curChunkSize := o.ChunkSize
if chunkNum == numChunks-1 { // Last chunk
curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total
}
offset := int64(chunkNum) * o.ChunkSize
operationChannel <- func() error {
return o.Operation(offset, curChunkSize, ctx)
}
}
close(operationChannel)
// Wait for the operations to complete.
var firstErr error = nil
for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ {
responseError := <-operationResponseChannel
// record the first error (the original error which should cause the other chunks to fail with canceled context)
if responseError != nil && firstErr == nil {
cancel() // As soon as any operation fails, cancel all remaining operation calls
firstErr = responseError
}
}
return firstErr
}

View file

@ -2,9 +2,9 @@
// +build go1.18 // +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved. // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. // Licensed under the MIT License. See License.txt in the project root for license information.
package azblob package shared
import ( import (
"errors" "errors"
@ -12,7 +12,7 @@ import (
type bytesWriter []byte type bytesWriter []byte
func newBytesWriter(b []byte) bytesWriter { func NewBytesWriter(b []byte) bytesWriter {
return b return b
} }

View file

@ -0,0 +1,53 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package shared
import (
"errors"
"io"
)
type SectionWriter struct {
Count int64
Offset int64
Position int64
WriterAt io.WriterAt
}
func NewSectionWriter(c io.WriterAt, off int64, count int64) *SectionWriter {
return &SectionWriter{
Count: count,
Offset: off,
WriterAt: c,
}
}
func (c *SectionWriter) Write(p []byte) (int, error) {
remaining := c.Count - c.Position
if remaining <= 0 {
return 0, errors.New("end of section reached")
}
slice := p
if int64(len(slice)) > remaining {
slice = slice[:remaining]
}
n, err := c.WriterAt.WriteAt(slice, c.Offset+c.Position)
c.Position += int64(n)
if err != nil {
return n, err
}
if len(p) > n {
return n, errors.New("not enough space for all bytes")
}
return n, nil
}

View file

@ -0,0 +1,238 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package shared
import (
"errors"
"fmt"
"io"
"net"
"net/url"
"strconv"
"strings"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
const (
TokenScope = "https://storage.azure.com/.default"
)
const (
HeaderAuthorization = "Authorization"
HeaderXmsDate = "x-ms-date"
HeaderContentLength = "Content-Length"
HeaderContentEncoding = "Content-Encoding"
HeaderContentLanguage = "Content-Language"
HeaderContentType = "Content-Type"
HeaderContentMD5 = "Content-MD5"
HeaderIfModifiedSince = "If-Modified-Since"
HeaderIfMatch = "If-Match"
HeaderIfNoneMatch = "If-None-Match"
HeaderIfUnmodifiedSince = "If-Unmodified-Since"
HeaderRange = "Range"
)
// CopyOptions returns a zero-value T if opts is nil.
// If opts is not nil, a copy is made and its address returned.
func CopyOptions[T any](opts *T) *T {
if opts == nil {
return new(T)
}
cp := *opts
return &cp
}
var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " +
"should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=<accountName>;" +
"AccountKey=<accountKey>;EndpointSuffix=core.windows.net'")
type ParsedConnectionString struct {
ServiceURL string
AccountName string
AccountKey string
}
func ParseConnectionString(connectionString string) (ParsedConnectionString, error) {
const (
defaultScheme = "https"
defaultSuffix = "core.windows.net"
)
connStrMap := make(map[string]string)
connectionString = strings.TrimRight(connectionString, ";")
splitString := strings.Split(connectionString, ";")
if len(splitString) == 0 {
return ParsedConnectionString{}, errConnectionString
}
for _, stringPart := range splitString {
parts := strings.SplitN(stringPart, "=", 2)
if len(parts) != 2 {
return ParsedConnectionString{}, errConnectionString
}
connStrMap[parts[0]] = parts[1]
}
accountName, ok := connStrMap["AccountName"]
if !ok {
return ParsedConnectionString{}, errors.New("connection string missing AccountName")
}
accountKey, ok := connStrMap["AccountKey"]
if !ok {
sharedAccessSignature, ok := connStrMap["SharedAccessSignature"]
if !ok {
return ParsedConnectionString{}, errors.New("connection string missing AccountKey and SharedAccessSignature")
}
return ParsedConnectionString{
ServiceURL: fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature),
}, nil
}
protocol, ok := connStrMap["DefaultEndpointsProtocol"]
if !ok {
protocol = defaultScheme
}
suffix, ok := connStrMap["EndpointSuffix"]
if !ok {
suffix = defaultSuffix
}
if blobEndpoint, ok := connStrMap["BlobEndpoint"]; ok {
return ParsedConnectionString{
ServiceURL: blobEndpoint,
AccountName: accountName,
AccountKey: accountKey,
}, nil
}
return ParsedConnectionString{
ServiceURL: fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix),
AccountName: accountName,
AccountKey: accountKey,
}, nil
}
// SerializeBlobTags converts tags to generated.BlobTags
func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags {
if tagsMap == nil {
return nil
}
blobTagSet := make([]*generated.BlobTag, 0)
for key, val := range tagsMap {
newKey, newVal := key, val
blobTagSet = append(blobTagSet, &generated.BlobTag{Key: &newKey, Value: &newVal})
}
return &generated.BlobTags{BlobTagSet: blobTagSet}
}
func SerializeBlobTagsToStrPtr(tagsMap map[string]string) *string {
if tagsMap == nil {
return nil
}
tags := make([]string, 0)
for key, val := range tagsMap {
tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val))
}
blobTagsString := strings.Join(tags, "&")
return &blobTagsString
}
func ValidateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) {
if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long
return 0, nil
}
err := validateSeekableStreamAt0(body)
if err != nil {
return 0, err
}
count, err := body.Seek(0, io.SeekEnd)
if err != nil {
return 0, errors.New("body stream must be seekable")
}
_, err = body.Seek(0, io.SeekStart)
if err != nil {
return 0, err
}
return count, nil
}
// return an error if body is not a valid seekable stream at 0
func validateSeekableStreamAt0(body io.ReadSeeker) error {
if body == nil { // nil body's are "logically" seekable to 0
return nil
}
if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil {
// Help detect programmer error
if err != nil {
return errors.New("body stream must be seekable")
}
return errors.New("body stream must be set to position 0")
}
return nil
}
func RangeToString(offset, count int64) string {
return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10)
}
type nopCloser struct {
io.ReadSeeker
}
func (n nopCloser) Close() error {
return nil
}
// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
return nopCloser{rs}
}
func GenerateLeaseID(leaseID *string) (*string, error) {
if leaseID == nil {
generatedUuid, err := uuid.New()
if err != nil {
return nil, err
}
leaseID = to.Ptr(generatedUuid.String())
}
return leaseID, nil
}
func GetClientOptions[T any](o *T) *T {
if o == nil {
return new(T)
}
return o
}
// IsIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as:
// http(s)://IP(:port)/storageaccount/container/...
// As url's Host property, host could be both host or host:port
func IsIPEndpointStyle(host string) bool {
if host == "" {
return false
}
if h, _, err := net.SplitHostPort(host); err == nil {
host = h
}
// For IPv6, there could be case where SplitHostPort fails for cannot finding port.
// In this case, eliminate the '[' and ']' in the URL.
// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
if host[0] == '[' && host[len(host)-1] == ']' {
host = host[1 : len(host)-1]
}
return net.ParseIP(host) != nil
}

View file

@ -2,15 +2,17 @@
// +build go1.18 // +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved. // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. // Licensed under the MIT License. See License.txt in the project root for license information.
package azblob package shared
import ( import (
"fmt" "fmt"
"sync" "sync"
) )
const _1MiB = 1024 * 1024
// TransferManager provides a buffer and thread pool manager for certain transfer options. // TransferManager provides a buffer and thread pool manager for certain transfer options.
// It is undefined behavior if code outside this package call any of these methods. // It is undefined behavior if code outside this package call any of these methods.
type TransferManager interface { type TransferManager interface {

View file

@ -1,150 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package internal
import (
"bytes"
"context"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"strconv"
"time"
)
// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header.
type CtxWithHTTPHeaderKey struct{}
// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions.
type CtxWithRetryOptionsKey struct{}
type nopCloser struct {
io.ReadSeeker
}
func (n nopCloser) Close() error {
return nil
}
// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker.
func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser {
return nopCloser{rs}
}
// BodyDownloadPolicyOpValues is the struct containing the per-operation values
type BodyDownloadPolicyOpValues struct {
Skip bool
}
func NewResponseError(inner error, resp *http.Response) error {
return &ResponseError{inner: inner, resp: resp}
}
type ResponseError struct {
inner error
resp *http.Response
}
// Error implements the error interface for type ResponseError.
func (e *ResponseError) Error() string {
return e.inner.Error()
}
// Unwrap returns the inner error.
func (e *ResponseError) Unwrap() error {
return e.inner
}
// RawResponse returns the HTTP response associated with this error.
func (e *ResponseError) RawResponse() *http.Response {
return e.resp
}
// NonRetriable indicates this error is non-transient.
func (e *ResponseError) NonRetriable() {
// marker method
}
// Delay waits for the duration to elapse or the context to be cancelled.
func Delay(ctx context.Context, delay time.Duration) error {
select {
case <-time.After(delay):
return nil
case <-ctx.Done():
return ctx.Err()
}
}
// ErrNoBody is returned if the response didn't contain a body.
var ErrNoBody = errors.New("the response did not contain a body")
// GetJSON reads the response body into a raw JSON object.
// It returns ErrNoBody if there was no content.
func GetJSON(resp *http.Response) (map[string]interface{}, error) {
body, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
if err != nil {
return nil, err
}
if len(body) == 0 {
return nil, ErrNoBody
}
// put the body back so it's available to others
resp.Body = ioutil.NopCloser(bytes.NewReader(body))
// unmarshall the body to get the value
var jsonBody map[string]interface{}
if err = json.Unmarshal(body, &jsonBody); err != nil {
return nil, err
}
return jsonBody, nil
}
const HeaderRetryAfter = "Retry-After"
// RetryAfter returns non-zero if the response contains a Retry-After header value.
func RetryAfter(resp *http.Response) time.Duration {
if resp == nil {
return 0
}
ra := resp.Header.Get(HeaderRetryAfter)
if ra == "" {
return 0
}
// retry-after values are expressed in either number of
// seconds or an HTTP-date indicating when to try again
if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 {
return time.Duration(retryAfter) * time.Second
} else if t, err := time.Parse(time.RFC1123, ra); err == nil {
return time.Until(t)
}
return 0
}
// HasStatusCode returns true if the Response's status code is one of the specified values.
func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
if resp == nil {
return false
}
for _, sc := range statusCodes {
if resp.StatusCode == sc {
return true
}
}
return false
}
const defaultScope = "/.default"
// EndpointToScope converts the provided URL endpoint to its default scope.
func EndpointToScope(endpoint string) string {
if endpoint[len(endpoint)-1] != '/' {
endpoint += "/"
}
return endpoint + defaultScope
}

View file

@ -0,0 +1,76 @@
# Guide to migrate from `azure-storage-blob-go` to `azblob`
This guide is intended to assist in the migration from the `azure-storage-blob-go` module, or previous betas of `azblob`, to the latest releases of the `azblob` module.
## Simplified API surface area
The redesign of the `azblob` module separates clients into various sub-packages.
In previous versions, the public surface area was "flat", so all clients and supporting types were in the `azblob` package.
This made it difficult to navigate the public surface area.
## Clients
In `azure-storage-blob-go` a client constructor always requires a `url.URL` and `Pipeline` parameters.
In `azblob` a client constructor always requires a `string` URL, any specified credential type, and a `*ClientOptions` for optional values. You pass `nil` to accept default options.
```go
// new code
client, err := azblob.NewClient("<my storage account URL>", cred, nil)
```
## Authentication
In `azure-storage-blob-go` you created a `Pipeline` with the required credential type. This pipeline was then passed to the client constructor.
In `azblob`, you pass the required credential directly to the client constructor.
```go
// new code. cred is an AAD token credential created from the azidentity module
client, err := azblob.NewClient("<my storage account URL>", cred, nil)
```
The `azure-storage-blob-go` module provided limited support for OAuth token authentication via `NewTokenCredential`.
This been replaced by using Azure Identity credentials from [azidentity](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#section-readme).
Authentication with a shared key via `NewSharedKeyCredential` remains unchanged.
In `azure-storage-blob-go` you created a `Pipeline` with `NewAnonymousCredential` to support anonymous or SAS authentication.
In `azblob` you use the construtor `NewClientWithNoCredential()` instead.
```go
// new code
client, err := azblob.NewClientWithNoCredential("<public blob or blob with SAS URL>", nil)
```
## Listing blobs/containers
In `azure-storage-blob-go` you explicitly created a `Marker` type that was used to page over results ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-package)).
In `azblob`, operations that return paginated values return a `*runtime.Pager[T]`.
```go
// new code
pager := client.NewListBlobsFlatPager("my-container", nil)
for pager.More() {
page, err := pager.NextPage(context.TODO())
// process results
}
```
## Configuring the HTTP pipeline
In `azure-storage-blob-go` you explicitly created a HTTP pipeline with configuration before creating a client.
This pipeline instance was then passed as an argument to the client constructor ([example](https://pkg.go.dev/github.com/Azure/azure-storage-blob-go/azblob?utm_source=godoc#example-NewPipeline)).
In `azblob` a HTTP pipeline is created during client construction. The pipeline is configured through the `azcore.ClientOptions` type.
```go
// new code
client, err := azblob.NewClient(account, cred, &azblob.ClientOptions{
ClientOptions: azcore.ClientOptions{
// configure HTTP pipeline options here
},
})
```

View file

@ -0,0 +1,70 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package azblob
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
)
// CreateContainerOptions contains the optional parameters for the ContainerClient.Create method.
type CreateContainerOptions = service.CreateContainerOptions
// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method.
type DeleteContainerOptions = service.DeleteContainerOptions
// DeleteBlobOptions contains the optional parameters for the Client.Delete method.
type DeleteBlobOptions = blob.DeleteOptions
// DownloadStreamOptions contains the optional parameters for the Client.DownloadStream method.
type DownloadStreamOptions = blob.DownloadStreamOptions
// ListBlobsFlatOptions contains the optional parameters for the container.Client.ListBlobFlatSegment method.
type ListBlobsFlatOptions = container.ListBlobsFlatOptions
// ListBlobsInclude indicates what additional information the service should return with each blob.
type ListBlobsInclude = container.ListBlobsInclude
// ListContainersOptions contains the optional parameters for the container.Client.ListContainers operation
type ListContainersOptions = service.ListContainersOptions
// UploadBufferOptions provides set of configurations for UploadBuffer operation
type UploadBufferOptions = blockblob.UploadBufferOptions
// UploadFileOptions provides set of configurations for UploadFile operation
type UploadFileOptions = blockblob.UploadFileOptions
// UploadStreamOptions provides set of configurations for UploadStream operation
type UploadStreamOptions = blockblob.UploadStreamOptions
// DownloadBufferOptions identifies options used by the DownloadBuffer and DownloadFile functions.
type DownloadBufferOptions = blob.DownloadBufferOptions
// DownloadFileOptions identifies options used by the DownloadBuffer and DownloadFile functions.
type DownloadFileOptions = blob.DownloadFileOptions
// CpkInfo contains a group of parameters for client provided encryption key.
type CpkInfo = generated.CpkInfo
// CpkScopeInfo contains a group of parameters for the ContainerClient.Create method.
type CpkScopeInfo = generated.ContainerCpkScopeInfo
// AccessConditions identifies blob-specific access conditions which you optionally set.
type AccessConditions = exported.BlobAccessConditions
// ListContainersInclude indicates what additional information the service should return with each container.
type ListContainersInclude = service.ListContainersInclude
// ObjectReplicationPolicy are deserialized attributes
type ObjectReplicationPolicy = blob.ObjectReplicationPolicy
// RetryReaderOptions contains properties which can help to decide when to do retry.
type RetryReaderOptions = blob.RetryReaderOptions

View file

@ -0,0 +1,393 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package pageblob
import (
"context"
"io"
"net/http"
"net/url"
"os"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
// Client represents a client to an Azure Storage page blob;
type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient]
// NewClient creates a ServiceClient object using the specified URL, Azure AD credential, and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithNoCredential creates a ServiceClient object using the specified URL and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net?<SAS token>
func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil
}
// NewClientWithSharedKeyCredential creates a ServiceClient object using the specified URL, shared key, and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewPageBlobClient(blobURL, pl, cred)), nil
}
// NewClientFromConnectionString creates Client from a connection String
func NewClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*Client, error) {
parsed, err := shared.ParseConnectionString(connectionString)
if err != nil {
return nil, err
}
parsed.ServiceURL = runtime.JoinPaths(parsed.ServiceURL, containerName, blobName)
if parsed.AccountKey != "" && parsed.AccountName != "" {
credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey)
if err != nil {
return nil, err
}
return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options)
}
return NewClientWithNoCredential(parsed.ServiceURL, options)
}
func (pb *Client) generated() *generated.PageBlobClient {
_, pageBlob := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb))
return pageBlob
}
// URL returns the URL endpoint used by the Client object.
func (pb *Client) URL() string {
return pb.generated().Endpoint()
}
// BlobClient returns the embedded blob client for this AppendBlob client.
func (pb *Client) BlobClient() *blob.Client {
innerBlob, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb))
return (*blob.Client)(innerBlob)
}
func (pb *Client) sharedKey() *blob.SharedKeyCredential {
return base.SharedKeyComposite((*base.CompositeClient[generated.BlobClient, generated.PageBlobClient])(pb))
}
// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (pb *Client) WithSnapshot(snapshot string) (*Client, error) {
p, err := blob.ParseURL(pb.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil
}
// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the version returning a URL to the base blob.
func (pb *Client) WithVersionID(versionID string) (*Client, error) {
p, err := blob.ParseURL(pb.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil
}
// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (pb *Client) Create(ctx context.Context, size int64, o *CreateOptions) (CreateResponse, error) {
createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format()
resp, err := pb.generated().Create(ctx, 0, size, createOptions, HTTPHeaders,
leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
return resp, err
}
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb *Client) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *UploadPagesOptions) (UploadPagesResponse, error) {
count, err := shared.ValidateSeekableStreamAt0AndGetCount(body)
if err != nil {
return UploadPagesResponse{}, err
}
uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format()
resp, err := pb.generated().UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions,
cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
return resp, err
}
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
// The sourceOffset specifies the start offset of source data to copy from.
// The destOffset specifies the start offset of data in page blob will be written to.
// The count must be a multiple of 512 bytes.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
func (pb *Client) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64,
o *UploadPagesFromURLOptions) (UploadPagesFromURLResponse, error) {
uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions,
modifiedAccessConditions, sourceModifiedAccessConditions := o.format()
resp, err := pb.generated().UploadPagesFromURL(ctx, source, shared.RangeToString(sourceOffset, count), 0,
shared.RangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions,
sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
return resp, err
}
// ClearPages frees the specified pages from the page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb *Client) ClearPages(ctx context.Context, rnge blob.HTTPRange, options *ClearPagesOptions) (ClearPagesResponse, error) {
clearOptions := &generated.PageBlobClientClearPagesOptions{
Range: exported.FormatHTTPRange(rnge),
}
leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format()
resp, err := pb.generated().ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo,
cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
return resp, err
}
// NewGetPageRangesPager returns the list of valid page ranges for a page blob or snapshot of a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb *Client) NewGetPageRangesPager(o *GetPageRangesOptions) *runtime.Pager[GetPageRangesResponse] {
opts, leaseAccessConditions, modifiedAccessConditions := o.format()
return runtime.NewPager(runtime.PagingHandler[GetPageRangesResponse]{
More: func(page GetPageRangesResponse) bool {
return page.NextMarker != nil && len(*page.NextMarker) > 0
},
Fetcher: func(ctx context.Context, page *GetPageRangesResponse) (GetPageRangesResponse, error) {
var req *policy.Request
var err error
if page == nil {
req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions)
} else {
opts.Marker = page.NextMarker
req, err = pb.generated().GetPageRangesCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions)
}
if err != nil {
return GetPageRangesResponse{}, err
}
resp, err := pb.generated().Pipeline().Do(req)
if err != nil {
return GetPageRangesResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return GetPageRangesResponse{}, runtime.NewResponseError(resp)
}
return pb.generated().GetPageRangesHandleResponse(resp)
},
})
}
// NewGetPageRangesDiffPager gets the collection of page ranges that differ between a specified snapshot and this page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb *Client) NewGetPageRangesDiffPager(o *GetPageRangesDiffOptions) *runtime.Pager[GetPageRangesDiffResponse] {
opts, leaseAccessConditions, modifiedAccessConditions := o.format()
return runtime.NewPager(runtime.PagingHandler[GetPageRangesDiffResponse]{
More: func(page GetPageRangesDiffResponse) bool {
return page.NextMarker != nil && len(*page.NextMarker) > 0
},
Fetcher: func(ctx context.Context, page *GetPageRangesDiffResponse) (GetPageRangesDiffResponse, error) {
var req *policy.Request
var err error
if page == nil {
req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions)
} else {
opts.Marker = page.NextMarker
req, err = pb.generated().GetPageRangesDiffCreateRequest(ctx, opts, leaseAccessConditions, modifiedAccessConditions)
}
if err != nil {
return GetPageRangesDiffResponse{}, err
}
resp, err := pb.generated().Pipeline().Do(req)
if err != nil {
return GetPageRangesDiffResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return GetPageRangesDiffResponse{}, runtime.NewResponseError(resp)
}
return pb.generated().GetPageRangesDiffHandleResponse(resp)
},
})
}
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (pb *Client) Resize(ctx context.Context, size int64, options *ResizeOptions) (ResizeResponse, error) {
resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format()
resp, err := pb.generated().Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
return resp, err
}
// UpdateSequenceNumber sets the page blob's sequence number.
func (pb *Client) UpdateSequenceNumber(ctx context.Context, options *UpdateSequenceNumberOptions) (UpdateSequenceNumberResponse, error) {
actionType, updateOptions, lac, mac := options.format()
resp, err := pb.generated().UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac)
return resp, err
}
// StartCopyIncremental begins an operation to start an incremental copy from one-page blob's snapshot to this page blob.
// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination.
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
func (pb *Client) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *CopyIncrementalOptions) (CopyIncrementalResponse, error) {
copySourceURL, err := url.Parse(copySource)
if err != nil {
return CopyIncrementalResponse{}, err
}
queryParams := copySourceURL.Query()
queryParams.Set("snapshot", prevSnapshot)
copySourceURL.RawQuery = queryParams.Encode()
pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format()
resp, err := pb.generated().CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions)
return resp, err
}
// Redeclared APIs
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (pb *Client) Delete(ctx context.Context, o *blob.DeleteOptions) (blob.DeleteResponse, error) {
return pb.BlobClient().Delete(ctx, o)
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
func (pb *Client) Undelete(ctx context.Context, o *blob.UndeleteOptions) (blob.UndeleteResponse, error) {
return pb.BlobClient().Undelete(ctx, o)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tier-ing see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (pb *Client) SetTier(ctx context.Context, tier blob.AccessTier, o *blob.SetTierOptions) (blob.SetTierResponse, error) {
return pb.BlobClient().SetTier(ctx, tier, o)
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (pb *Client) GetProperties(ctx context.Context, o *blob.GetPropertiesOptions) (blob.GetPropertiesResponse, error) {
return pb.BlobClient().GetProperties(ctx, o)
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
return pb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o)
}
// SetMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (pb *Client) SetMetadata(ctx context.Context, metadata map[string]string, o *blob.SetMetadataOptions) (blob.SetMetadataResponse, error) {
return pb.BlobClient().SetMetadata(ctx, metadata, o)
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
func (pb *Client) CreateSnapshot(ctx context.Context, o *blob.CreateSnapshotOptions) (blob.CreateSnapshotResponse, error) {
return pb.BlobClient().CreateSnapshot(ctx, o)
}
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (pb *Client) StartCopyFromURL(ctx context.Context, copySource string, o *blob.StartCopyFromURLOptions) (blob.StartCopyFromURLResponse, error) {
return pb.BlobClient().StartCopyFromURL(ctx, copySource, o)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
func (pb *Client) AbortCopyFromURL(ctx context.Context, copyID string, o *blob.AbortCopyFromURLOptions) (blob.AbortCopyFromURLResponse, error) {
return pb.BlobClient().AbortCopyFromURL(ctx, copyID, o)
}
// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
// Each call to this operation replaces all existing tags attached to the blob.
// To remove all tags from the blob, call this operation with no tags set.
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
func (pb *Client) SetTags(ctx context.Context, tags map[string]string, o *blob.SetTagsOptions) (blob.SetTagsResponse, error) {
return pb.BlobClient().SetTags(ctx, tags, o)
}
// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
func (pb *Client) GetTags(ctx context.Context, o *blob.GetTagsOptions) (blob.GetTagsResponse, error) {
return pb.BlobClient().GetTags(ctx, o)
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.CopyFromURLOptions) (blob.CopyFromURLResponse, error) {
return pb.BlobClient().CopyFromURL(ctx, copySource, o)
}
// Concurrent Download Functions -----------------------------------------------------------------------------------------
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (pb *Client) DownloadStream(ctx context.Context, o *blob.DownloadStreamOptions) (blob.DownloadStreamResponse, error) {
return pb.BlobClient().DownloadStream(ctx, o)
}
// DownloadBuffer downloads an Azure blob to a buffer with parallel.
func (pb *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *blob.DownloadBufferOptions) (int64, error) {
return pb.BlobClient().DownloadBuffer(ctx, shared.NewBytesWriter(buffer), o)
}
// DownloadFile downloads an Azure blob to a local file.
// The file would be truncated if the size doesn't match.
func (pb *Client) DownloadFile(ctx context.Context, file *os.File, o *blob.DownloadFileOptions) (int64, error) {
return pb.BlobClient().DownloadFile(ctx, file, o)
}

View file

@ -0,0 +1,65 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package pageblob
import "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
const (
// PageBytes indicates the number of bytes in a page (512).
PageBytes = 512
)
// CopyStatusType defines values for CopyStatusType
type CopyStatusType = generated.CopyStatusType
const (
CopyStatusTypePending CopyStatusType = generated.CopyStatusTypePending
CopyStatusTypeSuccess CopyStatusType = generated.CopyStatusTypeSuccess
CopyStatusTypeAborted CopyStatusType = generated.CopyStatusTypeAborted
CopyStatusTypeFailed CopyStatusType = generated.CopyStatusTypeFailed
)
// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type.
func PossibleCopyStatusTypeValues() []CopyStatusType {
return generated.PossibleCopyStatusTypeValues()
}
// PremiumPageBlobAccessTier defines values for Premium PageBlob's AccessTier
type PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTier
const (
PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP10
PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP15
PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP20
PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP30
PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP4
PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP40
PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP50
PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP6
PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP60
PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP70
PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = generated.PremiumPageBlobAccessTierP80
)
// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type.
func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier {
return generated.PossiblePremiumPageBlobAccessTierValues()
}
// SequenceNumberActionType defines values for SequenceNumberActionType
type SequenceNumberActionType = generated.SequenceNumberActionType
const (
SequenceNumberActionTypeMax SequenceNumberActionType = generated.SequenceNumberActionTypeMax
SequenceNumberActionTypeUpdate SequenceNumberActionType = generated.SequenceNumberActionTypeUpdate
SequenceNumberActionTypeIncrement SequenceNumberActionType = generated.SequenceNumberActionTypeIncrement
)
// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type.
func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType {
return generated.PossibleSequenceNumberActionTypeValues()
}

View file

@ -2,126 +2,123 @@
// +build go1.18 // +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved. // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. // Licensed under the MIT License. See License.txt in the project root for license information.
package azblob package pageblob
import ( import (
"strconv"
"time" "time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
) )
// --------------------------------------------------------------------------------------------------------------------- // Type Declarations ---------------------------------------------------------------------
func rangeToString(offset, count int64) string { // PageList - the list of pages
return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) type PageList = generated.PageList
}
// --------------------------------------------------------------------------------------------------------------------- // ClearRange defines a range of pages.
type ClearRange = generated.ClearRange
// PageBlobCreateOptions provides set of configurations for CreatePageBlob operation // PageRange defines a range of pages.
type PageBlobCreateOptions struct { type PageRange = generated.PageRange
// SequenceNumberAccessConditions contains a group of parameters for the Client.UploadPages method.
type SequenceNumberAccessConditions = generated.SequenceNumberAccessConditions
// Request Model Declaration -------------------------------------------------------------------------------------------
// CreateOptions contains the optional parameters for the Client.Create method.
type CreateOptions struct {
// Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
// the sequence number must be between 0 and 2^63 - 1. // the sequence number must be between 0 and 2^63 - 1.
BlobSequenceNumber *int64 SequenceNumber *int64
// Optional. Used to set blob tags in various blob operations. // Optional. Used to set blob tags in various blob operations.
BlobTagsMap map[string]string Tags map[string]string
// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
// operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs
// are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source
// blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers.
// See Naming and Referencing Containers, Blobs, and Metadata for more information. // See Naming and Referencing Containers, Blobs, and Metadata for more information.
Metadata map[string]string Metadata map[string]string
// Optional. Indicates the tier to be set on the page blob. // Optional. Indicates the tier to be set on the page blob.
Tier *PremiumPageBlobAccessTier Tier *PremiumPageBlobAccessTier
HTTPHeaders *BlobHTTPHeaders HTTPHeaders *blob.HTTPHeaders
CpkInfo *CpkInfo CpkInfo *blob.CpkInfo
CpkScopeInfo *CpkScopeInfo CpkScopeInfo *blob.CpkScopeInfo
BlobAccessConditions *BlobAccessConditions AccessConditions *blob.AccessConditions
// Specifies the date time when the blobs immutability policy is set to expire. // Specifies the date time when the blobs immutability policy is set to expire.
ImmutabilityPolicyExpiry *time.Time ImmutabilityPolicyExpiry *time.Time
// Specifies the immutability policy mode to set on the blob. // Specifies the immutability policy mode to set on the blob.
ImmutabilityPolicyMode *BlobImmutabilityPolicyMode ImmutabilityPolicyMode *blob.ImmutabilityPolicyMode
// Specified if a legal hold should be set on the blob. // Specified if a legal hold should be set on the blob.
LegalHold *bool LegalHold *bool
} }
func (o *PageBlobCreateOptions) format() (*pageBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { func (o *CreateOptions) format() (*generated.PageBlobClientCreateOptions, *generated.BlobHTTPHeaders,
*generated.LeaseAccessConditions, *generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) {
if o == nil { if o == nil {
return nil, nil, nil, nil, nil, nil return nil, nil, nil, nil, nil, nil
} }
options := &pageBlobClientCreateOptions{ options := &generated.PageBlobClientCreateOptions{
BlobSequenceNumber: o.BlobSequenceNumber, BlobSequenceNumber: o.SequenceNumber,
BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags),
Metadata: o.Metadata, Metadata: o.Metadata,
Tier: o.Tier, Tier: o.Tier,
} }
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions return options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
} }
// PageBlobCreateResponse contains the response from method PageBlobClient.Create.
type PageBlobCreateResponse struct {
pageBlobClientCreateResponse
}
func toPageBlobCreateResponse(resp pageBlobClientCreateResponse) PageBlobCreateResponse {
return PageBlobCreateResponse{resp}
}
// --------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------------------
// PageBlobUploadPagesOptions provides set of configurations for UploadPages operation // UploadPagesOptions contains the optional parameters for the Client.UploadPages method.
type PageBlobUploadPagesOptions struct { type UploadPagesOptions struct {
// Specify the transactional crc64 for the body, to be validated by the service. // Range specifies a range of bytes. The default value is all bytes.
PageRange *HttpRange Range blob.HTTPRange
TransactionalContentCRC64 []byte TransactionalContentCRC64 []byte
// Specify the transactional md5 for the body, to be validated by the service. // Specify the transactional md5 for the body, to be validated by the service.
TransactionalContentMD5 []byte TransactionalContentMD5 []byte
CpkInfo *CpkInfo CpkInfo *blob.CpkInfo
CpkScopeInfo *CpkScopeInfo CpkScopeInfo *blob.CpkScopeInfo
SequenceNumberAccessConditions *SequenceNumberAccessConditions SequenceNumberAccessConditions *SequenceNumberAccessConditions
BlobAccessConditions *BlobAccessConditions AccessConditions *blob.AccessConditions
} }
func (o *PageBlobUploadPagesOptions) format() (*pageBlobClientUploadPagesOptions, *LeaseAccessConditions, func (o *UploadPagesOptions) format() (*generated.PageBlobClientUploadPagesOptions, *generated.LeaseAccessConditions,
*CpkInfo, *CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) { *generated.CpkInfo, *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil { if o == nil {
return nil, nil, nil, nil, nil, nil return nil, nil, nil, nil, nil, nil
} }
options := &pageBlobClientUploadPagesOptions{ options := &generated.PageBlobClientUploadPagesOptions{
TransactionalContentCRC64: o.TransactionalContentCRC64, TransactionalContentCRC64: o.TransactionalContentCRC64,
TransactionalContentMD5: o.TransactionalContentMD5, TransactionalContentMD5: o.TransactionalContentMD5,
Range: exported.FormatHTTPRange(o.Range),
} }
if o.PageRange != nil { leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
options.Range = o.PageRange.format()
}
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format()
return options, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions return options, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions
} }
// PageBlobUploadPagesResponse contains the response from method PageBlobClient.UploadPages.
type PageBlobUploadPagesResponse struct {
pageBlobClientUploadPagesResponse
}
func toPageBlobUploadPagesResponse(resp pageBlobClientUploadPagesResponse) PageBlobUploadPagesResponse {
return PageBlobUploadPagesResponse{resp}
}
// --------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------------------
// PageBlobUploadPagesFromURLOptions provides set of configurations for UploadPagesFromURL operation // UploadPagesFromURLOptions contains the optional parameters for the Client.UploadPagesFromURL method.
type PageBlobUploadPagesFromURLOptions struct { type UploadPagesFromURLOptions struct {
// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
CopySourceAuthorization *string CopySourceAuthorization *string
// Specify the md5 calculated for the range of bytes that must be read from the copy source. // Specify the md5 calculated for the range of bytes that must be read from the copy source.
@ -129,75 +126,57 @@ type PageBlobUploadPagesFromURLOptions struct {
// Specify the crc64 calculated for the range of bytes that must be read from the copy source. // Specify the crc64 calculated for the range of bytes that must be read from the copy source.
SourceContentCRC64 []byte SourceContentCRC64 []byte
CpkInfo *CpkInfo CpkInfo *blob.CpkInfo
CpkScopeInfo *CpkScopeInfo CpkScopeInfo *blob.CpkScopeInfo
SequenceNumberAccessConditions *SequenceNumberAccessConditions SequenceNumberAccessConditions *SequenceNumberAccessConditions
SourceModifiedAccessConditions *SourceModifiedAccessConditions SourceModifiedAccessConditions *blob.SourceModifiedAccessConditions
BlobAccessConditions *BlobAccessConditions AccessConditions *blob.AccessConditions
} }
func (o *PageBlobUploadPagesFromURLOptions) format() (*pageBlobClientUploadPagesFromURLOptions, *CpkInfo, *CpkScopeInfo, func (o *UploadPagesFromURLOptions) format() (*generated.PageBlobClientUploadPagesFromURLOptions, *generated.CpkInfo, *generated.CpkScopeInfo,
*LeaseAccessConditions, *SequenceNumberAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) { *generated.LeaseAccessConditions, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions, *generated.SourceModifiedAccessConditions) {
if o == nil { if o == nil {
return nil, nil, nil, nil, nil, nil, nil return nil, nil, nil, nil, nil, nil, nil
} }
options := &pageBlobClientUploadPagesFromURLOptions{ options := &generated.PageBlobClientUploadPagesFromURLOptions{
SourceContentMD5: o.SourceContentMD5, SourceContentMD5: o.SourceContentMD5,
SourceContentcrc64: o.SourceContentCRC64, SourceContentcrc64: o.SourceContentCRC64,
CopySourceAuthorization: o.CopySourceAuthorization, CopySourceAuthorization: o.CopySourceAuthorization,
} }
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions
} }
// PageBlobUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL
type PageBlobUploadPagesFromURLResponse struct {
pageBlobClientUploadPagesFromURLResponse
}
func toPageBlobUploadPagesFromURLResponse(resp pageBlobClientUploadPagesFromURLResponse) PageBlobUploadPagesFromURLResponse {
return PageBlobUploadPagesFromURLResponse{resp}
}
// --------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------------------
// PageBlobClearPagesOptions provides set of configurations for PageBlobClient.ClearPages operation // ClearPagesOptions contains the optional parameters for the Client.ClearPages operation
type PageBlobClearPagesOptions struct { type ClearPagesOptions struct {
CpkInfo *CpkInfo CpkInfo *blob.CpkInfo
CpkScopeInfo *CpkScopeInfo CpkScopeInfo *blob.CpkScopeInfo
SequenceNumberAccessConditions *SequenceNumberAccessConditions SequenceNumberAccessConditions *SequenceNumberAccessConditions
BlobAccessConditions *BlobAccessConditions AccessConditions *blob.AccessConditions
} }
func (o *PageBlobClearPagesOptions) format() (*LeaseAccessConditions, *CpkInfo, func (o *ClearPagesOptions) format() (*generated.LeaseAccessConditions, *generated.CpkInfo,
*CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) { *generated.CpkScopeInfo, *generated.SequenceNumberAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil { if o == nil {
return nil, nil, nil, nil, nil return nil, nil, nil, nil, nil
} }
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions
} }
// PageBlobClearPagesResponse contains the response from method PageBlobClient.ClearPages
type PageBlobClearPagesResponse struct {
pageBlobClientClearPagesResponse
}
func toPageBlobClearPagesResponse(resp pageBlobClientClearPagesResponse) PageBlobClearPagesResponse {
return PageBlobClearPagesResponse{resp}
}
// --------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------------------
// PageBlobGetPageRangesOptions provides set of configurations for GetPageRanges operation // GetPageRangesOptions contains the optional parameters for the Client.NewGetPageRangesPager method.
type PageBlobGetPageRangesOptions struct { type GetPageRangesOptions struct {
Marker *string Marker *string
// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
// greater than 5000, the server will return up to 5000 items. Note that if the // greater than 5000, the server will return up to 5000 items. Note that if the
@ -215,43 +194,34 @@ type PageBlobGetPageRangesOptions struct {
// specified by prevsnapshot is the older of the two. Note that incremental // specified by prevsnapshot is the older of the two. Note that incremental
// snapshots are currently supported only for blobs created on or after January 1, 2016. // snapshots are currently supported only for blobs created on or after January 1, 2016.
PrevSnapshot *string PrevSnapshot *string
// Optional, you can specify whether a particular range of the blob is read // Range specifies a range of bytes. The default value is all bytes.
PageRange *HttpRange Range blob.HTTPRange
// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
// information on working with blob snapshots, see Creating a Snapshot of a Blob. // information on working with blob snapshots, see Creating a Snapshot of a Blob.
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
Snapshot *string Snapshot *string
BlobAccessConditions *BlobAccessConditions AccessConditions *blob.AccessConditions
} }
func (o *PageBlobGetPageRangesOptions) format() (*pageBlobClientGetPageRangesOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { func (o *GetPageRangesOptions) format() (*generated.PageBlobClientGetPageRangesOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil { if o == nil {
return nil, nil, nil return nil, nil, nil
} }
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &pageBlobClientGetPageRangesOptions{ return &generated.PageBlobClientGetPageRangesOptions{
Marker: o.Marker, Marker: o.Marker,
Maxresults: o.MaxResults, Maxresults: o.MaxResults,
Range: o.PageRange.format(), Range: exported.FormatHTTPRange(o.Range),
Snapshot: o.Snapshot, Snapshot: o.Snapshot,
}, leaseAccessConditions, modifiedAccessConditions }, leaseAccessConditions, modifiedAccessConditions
} }
// PageBlobGetPageRangesPager provides operations for iterating over paged responses
type PageBlobGetPageRangesPager struct {
*pageBlobClientGetPageRangesPager
}
func toPageBlobGetPageRangesPager(resp *pageBlobClientGetPageRangesPager) *PageBlobGetPageRangesPager {
return &PageBlobGetPageRangesPager{resp}
}
// --------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------------------
// PageBlobGetPageRangesDiffOptions provides set of configurations for PageBlobClient.GetPageRangesDiff operation // GetPageRangesDiffOptions contains the optional parameters for the Client.NewGetPageRangesDiffPager method.
type PageBlobGetPageRangesDiffOptions struct { type GetPageRangesDiffOptions struct {
// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
// operation returns the NextMarker value within the response body if the listing // operation returns the NextMarker value within the response body if the listing
// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
@ -274,115 +244,90 @@ type PageBlobGetPageRangesDiffOptions struct {
// specified by prevsnapshot is the older of the two. Note that incremental // specified by prevsnapshot is the older of the two. Note that incremental
// snapshots are currently supported only for blobs created on or after January 1, 2016. // snapshots are currently supported only for blobs created on or after January 1, 2016.
PrevSnapshot *string PrevSnapshot *string
// Optional, you can specify whether a particular range of the blob is read // Range specifies a range of bytes. The default value is all bytes.
PageRange *HttpRange Range blob.HTTPRange
// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
// information on working with blob snapshots, see Creating a Snapshot of a Blob. // information on working with blob snapshots, see Creating a Snapshot of a Blob.
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
Snapshot *string Snapshot *string
BlobAccessConditions *BlobAccessConditions AccessConditions *blob.AccessConditions
} }
func (o *PageBlobGetPageRangesDiffOptions) format() (*pageBlobClientGetPageRangesDiffOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { func (o *GetPageRangesDiffOptions) format() (*generated.PageBlobClientGetPageRangesDiffOptions, *generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil { if o == nil {
return nil, nil, nil return nil, nil, nil
} }
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return &pageBlobClientGetPageRangesDiffOptions{ return &generated.PageBlobClientGetPageRangesDiffOptions{
Marker: o.Marker, Marker: o.Marker,
Maxresults: o.MaxResults, Maxresults: o.MaxResults,
PrevSnapshotURL: o.PrevSnapshotURL, PrevSnapshotURL: o.PrevSnapshotURL,
Prevsnapshot: o.PrevSnapshot, Prevsnapshot: o.PrevSnapshot,
Range: o.PageRange.format(), Range: exported.FormatHTTPRange(o.Range),
Snapshot: o.Snapshot, Snapshot: o.Snapshot,
}, leaseAccessConditions, modifiedAccessConditions }, leaseAccessConditions, modifiedAccessConditions
} }
// PageBlobGetPageRangesDiffPager provides operations for iterating over paged responses
type PageBlobGetPageRangesDiffPager struct {
*pageBlobClientGetPageRangesDiffPager
}
func toPageBlobGetPageRangesDiffPager(resp *pageBlobClientGetPageRangesDiffPager) *PageBlobGetPageRangesDiffPager {
return &PageBlobGetPageRangesDiffPager{resp}
}
// --------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------------------
// PageBlobResizeOptions provides set of configurations for PageBlobClient.Resize operation // ResizeOptions contains the optional parameters for the Client.Resize method.
type PageBlobResizeOptions struct { type ResizeOptions struct {
CpkInfo *CpkInfo CpkInfo *blob.CpkInfo
CpkScopeInfo *CpkScopeInfo CpkScopeInfo *blob.CpkScopeInfo
BlobAccessConditions *BlobAccessConditions AccessConditions *blob.AccessConditions
} }
func (o *PageBlobResizeOptions) format() (*pageBlobClientResizeOptions, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { func (o *ResizeOptions) format() (*generated.PageBlobClientResizeOptions, *generated.LeaseAccessConditions,
*generated.CpkInfo, *generated.CpkScopeInfo, *generated.ModifiedAccessConditions) {
if o == nil { if o == nil {
return nil, nil, nil, nil, nil return nil, nil, nil, nil, nil
} }
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return nil, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions return nil, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions
} }
// PageBlobResizeResponse contains the response from method PageBlobClient.Resize
type PageBlobResizeResponse struct {
pageBlobClientResizeResponse
}
func toPageBlobResizeResponse(resp pageBlobClientResizeResponse) PageBlobResizeResponse {
return PageBlobResizeResponse{resp}
}
// --------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------------------
// PageBlobUpdateSequenceNumberOptions provides set of configurations for PageBlobClient.UpdateSequenceNumber operation // UpdateSequenceNumberOptions contains the optional parameters for the Client.UpdateSequenceNumber method.
type PageBlobUpdateSequenceNumberOptions struct { type UpdateSequenceNumberOptions struct {
ActionType *SequenceNumberActionType ActionType *SequenceNumberActionType
BlobSequenceNumber *int64 SequenceNumber *int64
BlobAccessConditions *BlobAccessConditions AccessConditions *blob.AccessConditions
} }
func (o *PageBlobUpdateSequenceNumberOptions) format() (*SequenceNumberActionType, *pageBlobClientUpdateSequenceNumberOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { func (o *UpdateSequenceNumberOptions) format() (*generated.SequenceNumberActionType, *generated.PageBlobClientUpdateSequenceNumberOptions,
*generated.LeaseAccessConditions, *generated.ModifiedAccessConditions) {
if o == nil { if o == nil {
return nil, nil, nil, nil return nil, nil, nil, nil
} }
options := &pageBlobClientUpdateSequenceNumberOptions{ options := &generated.PageBlobClientUpdateSequenceNumberOptions{
BlobSequenceNumber: o.BlobSequenceNumber, BlobSequenceNumber: o.SequenceNumber,
} }
if *o.ActionType == SequenceNumberActionTypeIncrement { if *o.ActionType == SequenceNumberActionTypeIncrement {
options.BlobSequenceNumber = nil options.BlobSequenceNumber = nil
} }
leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.AccessConditions)
return o.ActionType, options, leaseAccessConditions, modifiedAccessConditions return o.ActionType, options, leaseAccessConditions, modifiedAccessConditions
} }
// PageBlobUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber
type PageBlobUpdateSequenceNumberResponse struct {
pageBlobClientUpdateSequenceNumberResponse
}
func toPageBlobUpdateSequenceNumberResponse(resp pageBlobClientUpdateSequenceNumberResponse) PageBlobUpdateSequenceNumberResponse {
return PageBlobUpdateSequenceNumberResponse{resp}
}
// --------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------------------
// PageBlobCopyIncrementalOptions provides set of configurations for PageBlobClient.StartCopyIncremental operation // CopyIncrementalOptions contains the optional parameters for the Client.StartCopyIncremental method.
type PageBlobCopyIncrementalOptions struct { type CopyIncrementalOptions struct {
ModifiedAccessConditions *ModifiedAccessConditions ModifiedAccessConditions *blob.ModifiedAccessConditions
} }
func (o *PageBlobCopyIncrementalOptions) format() (*pageBlobClientCopyIncrementalOptions, *ModifiedAccessConditions) { func (o *CopyIncrementalOptions) format() (*generated.PageBlobClientCopyIncrementalOptions, *generated.ModifiedAccessConditions) {
if o == nil { if o == nil {
return nil, nil return nil, nil
} }
@ -390,13 +335,4 @@ func (o *PageBlobCopyIncrementalOptions) format() (*pageBlobClientCopyIncrementa
return nil, o.ModifiedAccessConditions return nil, o.ModifiedAccessConditions
} }
// PageBlobCopyIncrementalResponse contains the response from method PageBlobClient.StartCopyIncremental
type PageBlobCopyIncrementalResponse struct {
pageBlobClientCopyIncrementalResponse
}
func toPageBlobCopyIncrementalResponse(resp pageBlobClientCopyIncrementalResponse) PageBlobCopyIncrementalResponse {
return PageBlobCopyIncrementalResponse{resp}
}
// --------------------------------------------------------------------------------------------------------------------- // ---------------------------------------------------------------------------------------------------------------------

View file

@ -0,0 +1,38 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package pageblob
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// CreateResponse contains the response from method Client.Create.
type CreateResponse = generated.PageBlobClientCreateResponse
// UploadPagesResponse contains the response from method Client.UploadPages.
type UploadPagesResponse = generated.PageBlobClientUploadPagesResponse
// UploadPagesFromURLResponse contains the response from method Client.UploadPagesFromURL.
type UploadPagesFromURLResponse = generated.PageBlobClientUploadPagesFromURLResponse
// ClearPagesResponse contains the response from method Client.ClearPages.
type ClearPagesResponse = generated.PageBlobClientClearPagesResponse
// GetPageRangesResponse contains the response from method Client.NewGetPageRangesPager.
type GetPageRangesResponse = generated.PageBlobClientGetPageRangesResponse
// GetPageRangesDiffResponse contains the response from method Client.NewGetPageRangesDiffPager.
type GetPageRangesDiffResponse = generated.PageBlobClientGetPageRangesDiffResponse
// ResizeResponse contains the response from method Client.Resize.
type ResizeResponse = generated.PageBlobClientResizeResponse
// UpdateSequenceNumberResponse contains the response from method Client.UpdateSequenceNumber.
type UpdateSequenceNumberResponse = generated.PageBlobClientUpdateSequenceNumberResponse
// CopyIncrementalResponse contains the response from method Client.StartCopyIncremental.
type CopyIncrementalResponse = generated.PageBlobClientCopyIncrementalResponse

View file

@ -0,0 +1,51 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package azblob
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service"
)
// CreateContainerResponse contains the response from method container.Client.Create.
type CreateContainerResponse = service.CreateContainerResponse
// DeleteContainerResponse contains the response from method container.Client.Delete
type DeleteContainerResponse = service.DeleteContainerResponse
// DeleteBlobResponse contains the response from method blob.Client.Delete.
type DeleteBlobResponse = blob.DeleteResponse
// UploadResponse contains the response from method blockblob.Client.CommitBlockList.
type UploadResponse = blockblob.CommitBlockListResponse
// DownloadStreamResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry.
type DownloadStreamResponse = blob.DownloadStreamResponse
// ListBlobsFlatResponse contains the response from method container.Client.ListBlobFlatSegment.
type ListBlobsFlatResponse = container.ListBlobsFlatResponse
// ListContainersResponse contains the response from method service.Client.ListContainersSegment.
type ListContainersResponse = service.ListContainersResponse
// UploadBufferResponse contains the response from method Client.UploadBuffer/Client.UploadFile.
type UploadBufferResponse = blockblob.UploadBufferResponse
// UploadFileResponse contains the response from method Client.UploadBuffer/Client.UploadFile.
type UploadFileResponse = blockblob.UploadFileResponse
// UploadStreamResponse contains the response from method Client.CommitBlockList.
type UploadStreamResponse = blockblob.CommitBlockListResponse
// ListContainersSegmentResponse - An enumeration of containers
type ListContainersSegmentResponse = generated.ListContainersSegmentResponse
// ListBlobsFlatSegmentResponse - An enumeration of blobs
type ListBlobsFlatSegmentResponse = generated.ListBlobsFlatSegmentResponse

View file

@ -0,0 +1,316 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package sas
import (
"bytes"
"errors"
"fmt"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
)
// SharedKeyCredential contains an account's name and its primary or secondary key.
type SharedKeyCredential = exported.SharedKeyCredential
// UserDelegationCredential contains an account's name and its user delegation key.
type UserDelegationCredential = exported.UserDelegationCredential
// AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas
type AccountSignatureValues struct {
Version string `param:"sv"` // If not specified, this format to SASVersion
Protocol Protocol `param:"spr"` // See the SASProtocol* constants
StartTime time.Time `param:"st"` // Not specified if IsZero
ExpiryTime time.Time `param:"se"` // Not specified if IsZero
Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String()
IPRange IPRange `param:"sip"`
Services string `param:"ss"` // Create by initializing AccountSASServices and then call String()
ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String()
}
// SignWithSharedKey uses an account's shared key credential to sign this signature values to produce
// the proper SAS query parameters.
func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) {
// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" {
return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType")
}
if v.Version == "" {
v.Version = Version
}
perms, err := parseAccountPermissions(v.Permissions)
if err != nil {
return QueryParameters{}, err
}
v.Permissions = perms.String()
startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{})
stringToSign := strings.Join([]string{
sharedKeyCredential.AccountName(),
v.Permissions,
v.Services,
v.ResourceTypes,
startTime,
expiryTime,
v.IPRange.String(),
string(v.Protocol),
v.Version,
""}, // That is right, the account SAS requires a terminating extra newline
"\n")
signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign)
if err != nil {
return QueryParameters{}, err
}
p := QueryParameters{
// Common SAS parameters
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
// Account-specific SAS parameters
services: v.Services,
resourceTypes: v.ResourceTypes,
// Calculated SAS signature
signature: signature,
}
return p, nil
}
// SignWithUserDelegation uses an account's UserDelegationKey to sign this signature values to produce the proper SAS query parameters.
func (v AccountSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) {
// https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS
if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" {
return QueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType")
}
if v.Version == "" {
v.Version = Version
}
perms, err := parseAccountPermissions(v.Permissions)
if err != nil {
return QueryParameters{}, err
}
v.Permissions = perms.String()
startTime, expiryTime, _ := formatTimesForSigning(v.StartTime, v.ExpiryTime, time.Time{})
stringToSign := strings.Join([]string{
exported.GetAccountName(userDelegationCredential),
v.Permissions,
v.Services,
v.ResourceTypes,
startTime,
expiryTime,
v.IPRange.String(),
string(v.Protocol),
v.Version,
""}, // That is right, the account SAS requires a terminating extra newline
"\n")
signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign)
if err != nil {
return QueryParameters{}, err
}
p := QueryParameters{
// Common SAS parameters
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
// Account-specific SAS parameters
services: v.Services,
resourceTypes: v.ResourceTypes,
// Calculated SAS signature
signature: signature,
}
udk := exported.GetUDKParams(userDelegationCredential)
//User delegation SAS specific parameters
p.signedOID = *udk.SignedOID
p.signedTID = *udk.SignedTID
p.signedStart = *udk.SignedStart
p.signedExpiry = *udk.SignedExpiry
p.signedService = *udk.SignedService
p.signedVersion = *udk.SignedVersion
return p, nil
}
// AccountPermissions type simplifies creating the permissions string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field.
type AccountPermissions struct {
Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool
}
// String produces the SAS permissions string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues's Permissions field.
func (p *AccountPermissions) String() string {
var buffer bytes.Buffer
if p.Read {
buffer.WriteRune('r')
}
if p.Write {
buffer.WriteRune('w')
}
if p.Delete {
buffer.WriteRune('d')
}
if p.DeletePreviousVersion {
buffer.WriteRune('x')
}
if p.List {
buffer.WriteRune('l')
}
if p.Add {
buffer.WriteRune('a')
}
if p.Create {
buffer.WriteRune('c')
}
if p.Update {
buffer.WriteRune('u')
}
if p.Process {
buffer.WriteRune('p')
}
if p.Tag {
buffer.WriteRune('t')
}
if p.FilterByTags {
buffer.WriteRune('f')
}
return buffer.String()
}
// Parse initializes the AccountSASPermissions' fields from a string.
func parseAccountPermissions(s string) (AccountPermissions, error) {
p := AccountPermissions{} // Clear out the flags
for _, r := range s {
switch r {
case 'r':
p.Read = true
case 'w':
p.Write = true
case 'd':
p.Delete = true
case 'l':
p.List = true
case 'a':
p.Add = true
case 'c':
p.Create = true
case 'u':
p.Update = true
case 'p':
p.Process = true
case 'x':
p.Process = true
case 't':
p.Tag = true
case 'f':
p.FilterByTags = true
default:
return AccountPermissions{}, fmt.Errorf("invalid permission character: '%v'", r)
}
}
return p, nil
}
// AccountServices type simplifies creating the services string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field.
type AccountServices struct {
Blob, Queue, File bool
}
// String produces the SAS services string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues's Services field.
func (s *AccountServices) String() string {
var buffer bytes.Buffer
if s.Blob {
buffer.WriteRune('b')
}
if s.Queue {
buffer.WriteRune('q')
}
if s.File {
buffer.WriteRune('f')
}
return buffer.String()
}
// Parse initializes the AccountSASServices' fields from a string.
/*func parseAccountServices(str string) (AccountServices, error) {
s := AccountServices{} // Clear out the flags
for _, r := range str {
switch r {
case 'b':
s.Blob = true
case 'q':
s.Queue = true
case 'f':
s.File = true
default:
return AccountServices{}, fmt.Errorf("invalid service character: '%v'", r)
}
}
return s, nil
}*/
// AccountResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS.
// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field.
type AccountResourceTypes struct {
Service, Container, Object bool
}
// String produces the SAS resource types string for an Azure Storage account.
// Call this method to set AccountSASSignatureValues's ResourceTypes field.
func (rt *AccountResourceTypes) String() string {
var buffer bytes.Buffer
if rt.Service {
buffer.WriteRune('s')
}
if rt.Container {
buffer.WriteRune('c')
}
if rt.Object {
buffer.WriteRune('o')
}
return buffer.String()
}
// Parse initializes the AccountResourceTypes's fields from a string.
/*func parseAccountResourceTypes(s string) (AccountResourceTypes, error) {
rt := AccountResourceTypes{} // Clear out the flags
for _, r := range s {
switch r {
case 's':
rt.Service = true
case 'c':
rt.Container = true
case 'o':
rt.Object = true
default:
return AccountResourceTypes{}, fmt.Errorf("invalid resource type: '%v'", r)
}
}
return rt, nil
}*/

View file

@ -0,0 +1,440 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package sas
import (
"errors"
"net"
"net/url"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
)
// TimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time.
const (
TimeFormat = "2006-01-02T15:04:05Z" // "2017-07-27T00:00:00Z" // ISO 8601
)
var (
// Version is the default version encoded in the SAS token.
Version = "2019-12-12"
)
// TimeFormats ISO 8601 format.
// Please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details.
var timeFormats = []string{"2006-01-02T15:04:05.0000000Z", TimeFormat, "2006-01-02T15:04Z", "2006-01-02"}
// Protocol indicates the http/https.
type Protocol string
const (
// ProtocolHTTPS can be specified for a SAS protocol
ProtocolHTTPS Protocol = "https"
// ProtocolHTTPSandHTTP can be specified for a SAS protocol
ProtocolHTTPSandHTTP Protocol = "https,http"
)
// FormatTimesForSigning converts a time.Time to a snapshotTimeFormat string suitable for a
// Field's StartTime or ExpiryTime fields. Returns "" if value.IsZero().
func formatTimesForSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) {
ss := ""
if !startTime.IsZero() {
ss = formatTimeWithDefaultFormat(&startTime)
}
se := ""
if !expiryTime.IsZero() {
se = formatTimeWithDefaultFormat(&expiryTime)
}
sh := ""
if !snapshotTime.IsZero() {
sh = snapshotTime.Format(exported.SnapshotTimeFormat)
}
return ss, se, sh
}
// formatTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ".
func formatTimeWithDefaultFormat(t *time.Time) string {
return formatTime(t, TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used
}
// formatTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default.
func formatTime(t *time.Time, format string) string {
if format != "" {
return t.Format(format)
}
return t.Format(TimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used
}
// ParseTime try to parse a SAS time string.
func parseTime(val string) (t time.Time, timeFormat string, err error) {
for _, sasTimeFormat := range timeFormats {
t, err = time.Parse(sasTimeFormat, val)
if err == nil {
timeFormat = sasTimeFormat
break
}
}
if err != nil {
err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details")
}
return
}
// IPRange represents a SAS IP range's start IP and (optionally) end IP.
type IPRange struct {
Start net.IP // Not specified if length = 0
End net.IP // Not specified if length = 0
}
// String returns a string representation of an IPRange.
func (ipr *IPRange) String() string {
if len(ipr.Start) == 0 {
return ""
}
start := ipr.Start.String()
if len(ipr.End) == 0 {
return start
}
return start + "-" + ipr.End.String()
}
// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas
// QueryParameters object represents the components that make up an Azure Storage SAS' query parameters.
// You parse a map of query parameters into its fields by calling NewQueryParameters(). You add the components
// to a query parameter map by calling AddToValues().
// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type.
// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues).
type QueryParameters struct {
// All members are immutable or values so copies of this struct are goroutine-safe.
version string `param:"sv"`
services string `param:"ss"`
resourceTypes string `param:"srt"`
protocol Protocol `param:"spr"`
startTime time.Time `param:"st"`
expiryTime time.Time `param:"se"`
snapshotTime time.Time `param:"snapshot"`
ipRange IPRange `param:"sip"`
identifier string `param:"si"`
resource string `param:"sr"`
permissions string `param:"sp"`
signature string `param:"sig"`
cacheControl string `param:"rscc"`
contentDisposition string `param:"rscd"`
contentEncoding string `param:"rsce"`
contentLanguage string `param:"rscl"`
contentType string `param:"rsct"`
signedOID string `param:"skoid"`
signedTID string `param:"sktid"`
signedStart time.Time `param:"skt"`
signedService string `param:"sks"`
signedExpiry time.Time `param:"ske"`
signedVersion string `param:"skv"`
signedDirectoryDepth string `param:"sdd"`
preauthorizedAgentObjectID string `param:"saoid"`
agentObjectID string `param:"suoid"`
correlationID string `param:"scid"`
// private member used for startTime and expiryTime formatting.
stTimeFormat string
seTimeFormat string
}
// PreauthorizedAgentObjectID returns preauthorizedAgentObjectID
func (p *QueryParameters) PreauthorizedAgentObjectID() string {
return p.preauthorizedAgentObjectID
}
// AgentObjectID returns agentObjectID
func (p *QueryParameters) AgentObjectID() string {
return p.agentObjectID
}
// SignedCorrelationID returns signedCorrelationID
func (p *QueryParameters) SignedCorrelationID() string {
return p.correlationID
}
// SignedOID returns signedOID
func (p *QueryParameters) SignedOID() string {
return p.signedOID
}
// SignedTID returns signedTID
func (p *QueryParameters) SignedTID() string {
return p.signedTID
}
// SignedStart returns signedStart
func (p *QueryParameters) SignedStart() time.Time {
return p.signedStart
}
// SignedExpiry returns signedExpiry
func (p *QueryParameters) SignedExpiry() time.Time {
return p.signedExpiry
}
// SignedService returns signedService
func (p *QueryParameters) SignedService() string {
return p.signedService
}
// SignedVersion returns signedVersion
func (p *QueryParameters) SignedVersion() string {
return p.signedVersion
}
// SnapshotTime returns snapshotTime
func (p *QueryParameters) SnapshotTime() time.Time {
return p.snapshotTime
}
// Version returns version
func (p *QueryParameters) Version() string {
return p.version
}
// Services returns services
func (p *QueryParameters) Services() string {
return p.services
}
// ResourceTypes returns resourceTypes
func (p *QueryParameters) ResourceTypes() string {
return p.resourceTypes
}
// Protocol returns protocol
func (p *QueryParameters) Protocol() Protocol {
return p.protocol
}
// StartTime returns startTime
func (p *QueryParameters) StartTime() time.Time {
return p.startTime
}
// ExpiryTime returns expiryTime
func (p *QueryParameters) ExpiryTime() time.Time {
return p.expiryTime
}
// IPRange returns ipRange
func (p *QueryParameters) IPRange() IPRange {
return p.ipRange
}
// Identifier returns identifier
func (p *QueryParameters) Identifier() string {
return p.identifier
}
// Resource returns resource
func (p *QueryParameters) Resource() string {
return p.resource
}
// Permissions returns permissions
func (p *QueryParameters) Permissions() string {
return p.permissions
}
// Signature returns signature
func (p *QueryParameters) Signature() string {
return p.signature
}
// CacheControl returns cacheControl
func (p *QueryParameters) CacheControl() string {
return p.cacheControl
}
// ContentDisposition returns contentDisposition
func (p *QueryParameters) ContentDisposition() string {
return p.contentDisposition
}
// ContentEncoding returns contentEncoding
func (p *QueryParameters) ContentEncoding() string {
return p.contentEncoding
}
// ContentLanguage returns contentLanguage
func (p *QueryParameters) ContentLanguage() string {
return p.contentLanguage
}
// ContentType returns sontentType
func (p *QueryParameters) ContentType() string {
return p.contentType
}
// SignedDirectoryDepth returns signedDirectoryDepth
func (p *QueryParameters) SignedDirectoryDepth() string {
return p.signedDirectoryDepth
}
// Encode encodes the SAS query parameters into URL encoded form sorted by key.
func (p *QueryParameters) Encode() string {
v := url.Values{}
if p.version != "" {
v.Add("sv", p.version)
}
if p.services != "" {
v.Add("ss", p.services)
}
if p.resourceTypes != "" {
v.Add("srt", p.resourceTypes)
}
if p.protocol != "" {
v.Add("spr", string(p.protocol))
}
if !p.startTime.IsZero() {
v.Add("st", formatTime(&(p.startTime), p.stTimeFormat))
}
if !p.expiryTime.IsZero() {
v.Add("se", formatTime(&(p.expiryTime), p.seTimeFormat))
}
if len(p.ipRange.Start) > 0 {
v.Add("sip", p.ipRange.String())
}
if p.identifier != "" {
v.Add("si", p.identifier)
}
if p.resource != "" {
v.Add("sr", p.resource)
}
if p.permissions != "" {
v.Add("sp", p.permissions)
}
if p.signedOID != "" {
v.Add("skoid", p.signedOID)
v.Add("sktid", p.signedTID)
v.Add("skt", p.signedStart.Format(TimeFormat))
v.Add("ske", p.signedExpiry.Format(TimeFormat))
v.Add("sks", p.signedService)
v.Add("skv", p.signedVersion)
}
if p.signature != "" {
v.Add("sig", p.signature)
}
if p.cacheControl != "" {
v.Add("rscc", p.cacheControl)
}
if p.contentDisposition != "" {
v.Add("rscd", p.contentDisposition)
}
if p.contentEncoding != "" {
v.Add("rsce", p.contentEncoding)
}
if p.contentLanguage != "" {
v.Add("rscl", p.contentLanguage)
}
if p.contentType != "" {
v.Add("rsct", p.contentType)
}
if p.signedDirectoryDepth != "" {
v.Add("sdd", p.signedDirectoryDepth)
}
if p.preauthorizedAgentObjectID != "" {
v.Add("saoid", p.preauthorizedAgentObjectID)
}
if p.agentObjectID != "" {
v.Add("suoid", p.agentObjectID)
}
if p.correlationID != "" {
v.Add("scid", p.correlationID)
}
return v.Encode()
}
// NewQueryParameters creates and initializes a QueryParameters object based on the
// query parameter map's passed-in values. If deleteSASParametersFromValues is true,
// all SAS-related query parameters are removed from the passed-in map. If
// deleteSASParametersFromValues is false, the map passed-in map is unaltered.
func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) QueryParameters {
p := QueryParameters{}
for k, v := range values {
val := v[0]
isSASKey := true
switch strings.ToLower(k) {
case "sv":
p.version = val
case "ss":
p.services = val
case "srt":
p.resourceTypes = val
case "spr":
p.protocol = Protocol(val)
case "snapshot":
p.snapshotTime, _ = time.Parse(exported.SnapshotTimeFormat, val)
case "st":
p.startTime, p.stTimeFormat, _ = parseTime(val)
case "se":
p.expiryTime, p.seTimeFormat, _ = parseTime(val)
case "sip":
dashIndex := strings.Index(val, "-")
if dashIndex == -1 {
p.ipRange.Start = net.ParseIP(val)
} else {
p.ipRange.Start = net.ParseIP(val[:dashIndex])
p.ipRange.End = net.ParseIP(val[dashIndex+1:])
}
case "si":
p.identifier = val
case "sr":
p.resource = val
case "sp":
p.permissions = val
case "sig":
p.signature = val
case "rscc":
p.cacheControl = val
case "rscd":
p.contentDisposition = val
case "rsce":
p.contentEncoding = val
case "rscl":
p.contentLanguage = val
case "rsct":
p.contentType = val
case "skoid":
p.signedOID = val
case "sktid":
p.signedTID = val
case "skt":
p.signedStart, _ = time.Parse(TimeFormat, val)
case "ske":
p.signedExpiry, _ = time.Parse(TimeFormat, val)
case "sks":
p.signedService = val
case "skv":
p.signedVersion = val
case "sdd":
p.signedDirectoryDepth = val
case "saoid":
p.preauthorizedAgentObjectID = val
case "suoid":
p.agentObjectID = val
case "scid":
p.correlationID = val
default:
isSASKey = false // We didn't recognize the query parameter
}
if isSASKey && deleteSASParametersFromValues {
delete(values, k)
}
}
return p
}

View file

@ -4,22 +4,24 @@
// Copyright (c) Microsoft Corporation. All rights reserved. // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. // Licensed under the MIT License.
package azblob package sas
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"strings" "strings"
"time" "time"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
) )
// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. // BlobSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas // For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas
type BlobSASSignatureValues struct { type BlobSignatureValues struct {
Version string `param:"sv"` // If not specified, this defaults to SASVersion Version string `param:"sv"` // If not specified, this defaults to Version
Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants Protocol Protocol `param:"spr"` // See the Protocol* constants
StartTime time.Time `param:"st"` // Not specified if IsZero StartTime time.Time `param:"st"` // Not specified if IsZero
ExpiryTime time.Time `param:"se"` // Not specified if IsZero ExpiryTime time.Time `param:"se"` // Not specified if IsZero
SnapshotTime time.Time SnapshotTime time.Time
Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String()
IPRange IPRange `param:"sip"` IPRange IPRange `param:"sip"`
@ -45,81 +47,40 @@ func getDirectoryDepth(path string) string {
return fmt.Sprint(strings.Count(path, "/") + 1) return fmt.Sprint(strings.Count(path, "/") + 1)
} }
// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce // SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters.
// the proper SAS query parameters. func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) {
// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential
func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) {
resource := "c"
if sharedKeyCredential == nil { if sharedKeyCredential == nil {
return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential") return QueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential")
} }
//Make sure the permission characters are in the correct order
perms, err := parseBlobPermissions(v.Permissions)
if err != nil {
return QueryParameters{}, err
}
v.Permissions = perms.String()
resource := "c"
if !v.SnapshotTime.IsZero() { if !v.SnapshotTime.IsZero() {
resource = "bs" resource = "bs"
//Make sure the permission characters are in the correct order
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else if v.BlobVersion != "" { } else if v.BlobVersion != "" {
resource = "bv" resource = "bv"
//Make sure the permission characters are in the correct order
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else if v.Directory != "" { } else if v.Directory != "" {
resource = "d" resource = "d"
v.BlobName = "" v.BlobName = ""
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else if v.BlobName == "" { } else if v.BlobName == "" {
// Make sure the permission characters are in the correct order // do nothing
perms := &ContainerSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} else { } else {
resource = "b" resource = "b"
// Make sure the permission characters are in the correct order
perms := &BlobSASPermissions{}
if err := perms.Parse(v.Permissions); err != nil {
return SASQueryParameters{}, err
}
v.Permissions = perms.String()
} }
if v.Version == "" { if v.Version == "" {
v.Version = SASVersion v.Version = Version
} }
startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime)
signedIdentifier := v.Identifier signedIdentifier := v.Identifier
//udk := sharedKeyCredential.getUDKParams()
//
//if udk != nil {
// udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{})
// //I don't like this answer to combining the functions
// //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it.
// signedIdentifier = strings.Join([]string{
// udk.SignedOid,
// udk.SignedTid,
// udkStart,
// udkExpiry,
// udk.SignedService,
// udk.SignedVersion,
// v.PreauthorizedAgentObjectId,
// v.AgentObjectId,
// v.CorrelationId,
// }, "\n")
//}
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
stringToSign := strings.Join([]string{ stringToSign := strings.Join([]string{
v.Permissions, v.Permissions,
@ -139,13 +100,12 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share
v.ContentType}, // rsct v.ContentType}, // rsct
"\n") "\n")
signature := "" signature, err := exported.ComputeHMACSHA256(sharedKeyCredential, stringToSign)
signature, err := sharedKeyCredential.ComputeHMACSHA256(stringToSign)
if err != nil { if err != nil {
return SASQueryParameters{}, err return QueryParameters{}, err
} }
p := SASQueryParameters{ p := QueryParameters{
// Common SAS parameters // Common SAS parameters
version: v.Version, version: v.Version,
protocol: v.Protocol, protocol: v.Protocol,
@ -164,22 +124,122 @@ func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *Share
contentType: v.ContentType, contentType: v.ContentType,
snapshotTime: v.SnapshotTime, snapshotTime: v.SnapshotTime,
signedDirectoryDepth: getDirectoryDepth(v.Directory), signedDirectoryDepth: getDirectoryDepth(v.Directory),
preauthorizedAgentObjectId: v.PreauthorizedAgentObjectId, preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId,
agentObjectId: v.AgentObjectId, agentObjectID: v.AgentObjectId,
correlationId: v.CorrelationId, correlationID: v.CorrelationId,
// Calculated SAS signature // Calculated SAS signature
signature: signature, signature: signature,
} }
////User delegation SAS specific parameters return p, nil
//if udk != nil { }
// p.signedOid = udk.SignedOid
// p.signedTid = udk.SignedTid // SignWithUserDelegation uses an account's UserDelegationCredential to sign this signature values to produce the proper SAS query parameters.
// p.signedStart = udk.SignedStart func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *UserDelegationCredential) (QueryParameters, error) {
// p.signedExpiry = udk.SignedExpiry if userDelegationCredential == nil {
// p.signedService = udk.SignedService return QueryParameters{}, fmt.Errorf("cannot sign SAS query without User Delegation Key")
// p.signedVersion = udk.SignedVersion }
//}
//Make sure the permission characters are in the correct order
perms, err := parseBlobPermissions(v.Permissions)
if err != nil {
return QueryParameters{}, err
}
v.Permissions = perms.String()
resource := "c"
if !v.SnapshotTime.IsZero() {
resource = "bs"
} else if v.BlobVersion != "" {
resource = "bv"
} else if v.Directory != "" {
resource = "d"
v.BlobName = ""
} else if v.BlobName == "" {
// do nothing
} else {
resource = "b"
}
if v.Version == "" {
v.Version = Version
}
startTime, expiryTime, snapshotTime := formatTimesForSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime)
udk := exported.GetUDKParams(userDelegationCredential)
udkStart, udkExpiry, _ := formatTimesForSigning(*udk.SignedStart, *udk.SignedExpiry, time.Time{})
//I don't like this answer to combining the functions
//But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it.
signedIdentifier := strings.Join([]string{
*udk.SignedOID,
*udk.SignedTID,
udkStart,
udkExpiry,
*udk.SignedService,
*udk.SignedVersion,
v.PreauthorizedAgentObjectId,
v.AgentObjectId,
v.CorrelationId,
}, "\n")
// String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
stringToSign := strings.Join([]string{
v.Permissions,
startTime,
expiryTime,
getCanonicalName(exported.GetAccountName(userDelegationCredential), v.ContainerName, v.BlobName, v.Directory),
signedIdentifier,
v.IPRange.String(),
string(v.Protocol),
v.Version,
resource,
snapshotTime, // signed timestamp
v.CacheControl, // rscc
v.ContentDisposition, // rscd
v.ContentEncoding, // rsce
v.ContentLanguage, // rscl
v.ContentType}, // rsct
"\n")
signature, err := exported.ComputeUDCHMACSHA256(userDelegationCredential, stringToSign)
if err != nil {
return QueryParameters{}, err
}
p := QueryParameters{
// Common SAS parameters
version: v.Version,
protocol: v.Protocol,
startTime: v.StartTime,
expiryTime: v.ExpiryTime,
permissions: v.Permissions,
ipRange: v.IPRange,
// Container/Blob-specific SAS parameters
resource: resource,
identifier: v.Identifier,
cacheControl: v.CacheControl,
contentDisposition: v.ContentDisposition,
contentEncoding: v.ContentEncoding,
contentLanguage: v.ContentLanguage,
contentType: v.ContentType,
snapshotTime: v.SnapshotTime,
signedDirectoryDepth: getDirectoryDepth(v.Directory),
preauthorizedAgentObjectID: v.PreauthorizedAgentObjectId,
agentObjectID: v.AgentObjectId,
correlationID: v.CorrelationId,
// Calculated SAS signature
signature: signature,
}
//User delegation SAS specific parameters
p.signedOID = *udk.SignedOID
p.signedTID = *udk.SignedTID
p.signedStart = *udk.SignedStart
p.signedExpiry = *udk.SignedExpiry
p.signedService = *udk.SignedService
p.signedVersion = *udk.SignedVersion
return p, nil return p, nil
} }
@ -197,17 +257,17 @@ func getCanonicalName(account string, containerName string, blobName string, dir
return strings.Join(elements, "") return strings.Join(elements, "")
} }
// ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. // ContainerPermissions type simplifies creating the permissions string for an Azure Storage container SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob // All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob
type ContainerSASPermissions struct { type ContainerPermissions struct {
Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool
Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only
} }
// String produces the SAS permissions string for an Azure Storage container. // String produces the SAS permissions string for an Azure Storage container.
// Call this method to set BlobSASSignatureValues's Permissions field. // Call this method to set BlobSASSignatureValues's Permissions field.
func (p ContainerSASPermissions) String() string { func (p *ContainerPermissions) String() string {
var b bytes.Buffer var b bytes.Buffer
if p.Read { if p.Read {
b.WriteRune('r') b.WriteRune('r')
@ -245,9 +305,9 @@ func (p ContainerSASPermissions) String() string {
return b.String() return b.String()
} }
// Parse initializes the ContainerSASPermissions's fields from a string. // Parse initializes the ContainerSASPermissions' fields from a string.
func (p *ContainerSASPermissions) Parse(s string) error { /*func parseContainerPermissions(s string) (ContainerPermissions, error) {
*p = ContainerSASPermissions{} // Clear the flags p := ContainerPermissions{} // Clear the flags
for _, r := range s { for _, r := range s {
switch r { switch r {
case 'r': case 'r':
@ -273,21 +333,21 @@ func (p *ContainerSASPermissions) Parse(s string) error {
case 'p': case 'p':
p.ModifyPermissions = true p.ModifyPermissions = true
default: default:
return fmt.Errorf("invalid permission: '%v'", r) return ContainerPermissions{}, fmt.Errorf("invalid permission: '%v'", r)
} }
} }
return nil return p, nil
} }*/
// BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. // BlobPermissions type simplifies creating the permissions string for an Azure Storage blob SAS.
// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. // Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field.
type BlobSASPermissions struct { type BlobPermissions struct {
Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions bool Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions bool
} }
// String produces the SAS permissions string for an Azure Storage blob. // String produces the SAS permissions string for an Azure Storage blob.
// Call this method to set BlobSASSignatureValues's Permissions field. // Call this method to set BlobSignatureValues's Permissions field.
func (p BlobSASPermissions) String() string { func (p *BlobPermissions) String() string {
var b bytes.Buffer var b bytes.Buffer
if p.Read { if p.Read {
b.WriteRune('r') b.WriteRune('r')
@ -329,8 +389,8 @@ func (p BlobSASPermissions) String() string {
} }
// Parse initializes the BlobSASPermissions's fields from a string. // Parse initializes the BlobSASPermissions's fields from a string.
func (p *BlobSASPermissions) Parse(s string) error { func parseBlobPermissions(s string) (BlobPermissions, error) {
*p = BlobSASPermissions{} // Clear the flags p := BlobPermissions{} // Clear the flags
for _, r := range s { for _, r := range s {
switch r { switch r {
case 'r': case 'r':
@ -358,8 +418,8 @@ func (p *BlobSASPermissions) Parse(s string) error {
case 'p': case 'p':
p.Permissions = true p.Permissions = true
default: default:
return fmt.Errorf("invalid permission: '%v'", r) return BlobPermissions{}, fmt.Errorf("invalid permission: '%v'", r)
} }
} }
return nil return p, nil
} }

View file

@ -2,71 +2,52 @@
// +build go1.18 // +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved. // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. // Licensed under the MIT License. See License.txt in the project root for license information.
package azblob package sas
import ( import (
"net"
"net/url" "net/url"
"strings" "strings"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
) )
const ( const (
snapshot = "snapshot" snapshot = "snapshot"
versionId = "versionid" versionId = "versionid"
SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00"
) )
// BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an
// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL().
// NOTE: Changing any SAS-related field requires computing a new SAS signature.
type BlobURLParts struct {
Scheme string // Ex: "https://"
Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80"
IPEndpointStyleInfo IPEndpointStyleInfo
ContainerName string // "" if no container
BlobName string // "" if no blob
Snapshot string // "" if not a snapshot
SAS SASQueryParameters
UnparsedParams string
VersionID string // "" if not versioning enabled
}
// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. // IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator.
// Ex: "https://10.132.141.33/accountname/containername" // Ex: "https://10.132.141.33/accountname/containername"
type IPEndpointStyleInfo struct { type IPEndpointStyleInfo struct {
AccountName string // "" if not using IP endpoint style AccountName string // "" if not using IP endpoint style
} }
// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: // URLParts object represents the components that make up an Azure Storage Container/Blob URL.
// http(s)://IP(:port)/storageaccount/container/... // NOTE: Changing any SAS-related field requires computing a new SAS signature.
// As url's Host property, host could be both host or host:port type URLParts struct {
func isIPEndpointStyle(host string) bool { Scheme string // Ex: "https://"
if host == "" { Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80"
return false IPEndpointStyleInfo IPEndpointStyleInfo
} ContainerName string // "" if no container
if h, _, err := net.SplitHostPort(host); err == nil { BlobName string // "" if no blob
host = h Snapshot string // "" if not a snapshot
} SAS QueryParameters
// For IPv6, there could be case where SplitHostPort fails for cannot finding port. UnparsedParams string
// In this case, eliminate the '[' and ']' in the URL. VersionID string // "" if not versioning enabled
// For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732
if host[0] == '[' && host[len(host)-1] == ']' {
host = host[1 : len(host)-1]
}
return net.ParseIP(host) != nil
} }
// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other // ParseURL parses a URL initializing URLParts' fields including any SAS-related & snapshot query parameters.
// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object. // Any other query parameters remain in the UnparsedParams field.
func NewBlobURLParts(u string) (BlobURLParts, error) { func ParseURL(u string) (URLParts, error) {
uri, err := url.Parse(u) uri, err := url.Parse(u)
if err != nil { if err != nil {
return BlobURLParts{}, err return URLParts{}, err
} }
up := BlobURLParts{ up := URLParts{
Scheme: uri.Scheme, Scheme: uri.Scheme,
Host: uri.Host, Host: uri.Host,
} }
@ -77,7 +58,7 @@ func NewBlobURLParts(u string) (BlobURLParts, error) {
if path[0] == '/' { if path[0] == '/' {
path = path[1:] // If path starts with a slash, remove it path = path[1:] // If path starts with a slash, remove it
} }
if isIPEndpointStyle(up.Host) { if shared.IsIPEndpointStyle(up.Host) {
if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob
up.IPEndpointStyleInfo.AccountName = path up.IPEndpointStyleInfo.AccountName = path
path = "" // No ContainerName present in the URL so path should be empty path = "" // No ContainerName present in the URL so path should be empty
@ -114,27 +95,16 @@ func NewBlobURLParts(u string) (BlobURLParts, error) {
delete(paramsMap, "versionId") // delete "versionId" from paramsMap delete(paramsMap, "versionId") // delete "versionId" from paramsMap
} }
up.SAS = newSASQueryParameters(paramsMap, true) up.SAS = NewQueryParameters(paramsMap, true)
up.UnparsedParams = paramsMap.Encode() up.UnparsedParams = paramsMap.Encode()
return up, nil return up, nil
} }
type caseInsensitiveValues url.Values // map[string][]string // String returns a URL object whose fields are initialized from the URLParts fields. The URL's RawQuery
func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
key = strings.ToLower(key)
for k, v := range values {
if strings.ToLower(k) == key {
return v, true
}
}
return []string{}, false
}
// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery
// field contains the SAS, snapshot, and unparsed query parameters. // field contains the SAS, snapshot, and unparsed query parameters.
func (up BlobURLParts) URL() string { func (up URLParts) String() string {
path := "" path := ""
if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { if shared.IsIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" {
path += "/" + up.IPEndpointStyleInfo.AccountName path += "/" + up.IPEndpointStyleInfo.AccountName
} }
// Concatenate container & blob names (if they exist) // Concatenate container & blob names (if they exist)
@ -148,8 +118,8 @@ func (up BlobURLParts) URL() string {
rawQuery := up.UnparsedParams rawQuery := up.UnparsedParams
//If no snapshot is initially provided, fill it in from the SAS query properties to help the user //If no snapshot is initially provided, fill it in from the SAS query properties to help the user
if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() { if up.Snapshot == "" && !up.SAS.SnapshotTime().IsZero() {
up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat) up.Snapshot = up.SAS.SnapshotTime().Format(exported.SnapshotTimeFormat)
} }
// Concatenate blob version id query parameter (if it exists) // Concatenate blob version id query parameter (if it exists)
@ -182,3 +152,15 @@ func (up BlobURLParts) URL() string {
} }
return u.String() return u.String()
} }
type caseInsensitiveValues url.Values // map[string][]string
func (values caseInsensitiveValues) Get(key string) ([]string, bool) {
key = strings.ToLower(key)
for k, v := range values {
if strings.ToLower(k) == key {
return v, true
}
}
return []string{}, false
}

View file

@ -1,53 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"errors"
"io"
)
type sectionWriter struct {
count int64
offset int64
position int64
writerAt io.WriterAt
}
func newSectionWriter(c io.WriterAt, off int64, count int64) *sectionWriter {
return &sectionWriter{
count: count,
offset: off,
writerAt: c,
}
}
func (c *sectionWriter) Write(p []byte) (int, error) {
remaining := c.count - c.position
if remaining <= 0 {
return 0, errors.New("end of section reached")
}
slice := p
if int64(len(slice)) > remaining {
slice = slice[:remaining]
}
n, err := c.writerAt.WriteAt(slice, c.offset+c.position)
c.position += int64(n)
if err != nil {
return n, err
}
if len(p) > n {
return n, errors.New("not enough space for all bytes")
}
return n, nil
}

View file

@ -0,0 +1,279 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package service
import (
"context"
"errors"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"net/http"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
)
// ClientOptions contains the optional parameters when creating a Client.
type ClientOptions struct {
azcore.ClientOptions
}
// Client represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers.
type Client base.Client[generated.ServiceClient]
// NewClient creates a Client object using the specified URL, Azure AD credential, and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{shared.TokenScope}, nil)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil
}
// NewClientWithNoCredential creates a Client object using the specified URL and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net?<SAS token>
func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) {
conOptions := shared.GetClientOptions(options)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil
}
// NewClientWithSharedKeyCredential creates a Client object using the specified URL, shared key, and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) {
authPolicy := exported.NewSharedKeyCredPolicy(cred)
conOptions := shared.GetClientOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions)
return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil
}
// NewClientFromConnectionString creates a service client from the given connection string.
// nolint
func NewClientFromConnectionString(connectionString string, options *ClientOptions) (*Client, error) {
parsed, err := shared.ParseConnectionString(connectionString)
if err != nil {
return nil, err
}
if parsed.AccountKey != "" && parsed.AccountName != "" {
credential, err := exported.NewSharedKeyCredential(parsed.AccountName, parsed.AccountKey)
if err != nil {
return nil, err
}
return NewClientWithSharedKeyCredential(parsed.ServiceURL, credential, options)
}
return NewClientWithNoCredential(parsed.ServiceURL, options)
}
// GetUserDelegationCredential obtains a UserDelegationKey object using the base ServiceURL object.
// OAuth is required for this call, as well as any role that can delegate access to the storage account.
func (s *Client) GetUserDelegationCredential(ctx context.Context, info KeyInfo, o *GetUserDelegationCredentialOptions) (*UserDelegationCredential, error) {
url, err := blob.ParseURL(s.URL())
if err != nil {
return nil, err
}
getUserDelegationKeyOptions := o.format()
udk, err := s.generated().GetUserDelegationKey(ctx, info, getUserDelegationKeyOptions)
if err != nil {
return nil, err
}
return exported.NewUserDelegationCredential(strings.Split(url.Host, ".")[0], udk.UserDelegationKey), nil
}
func (s *Client) generated() *generated.ServiceClient {
return base.InnerClient((*base.Client[generated.ServiceClient])(s))
}
func (s *Client) sharedKey() *SharedKeyCredential {
return base.SharedKey((*base.Client[generated.ServiceClient])(s))
}
// URL returns the URL endpoint used by the Client object.
func (s *Client) URL() string {
return s.generated().Endpoint()
}
// NewContainerClient creates a new ContainerClient object by concatenating containerName to the end of
// Client's URL. The new ContainerClient uses the same request policy pipeline as the Client.
// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's
// NewContainerClient method.
func (s *Client) NewContainerClient(containerName string) *container.Client {
containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName)
return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.sharedKey()))
}
// CreateContainer is a lifecycle method to creates a new container under the specified account.
// If the container with the same name already exists, a ResourceExistsError will be raised.
// This method returns a client with which to interact with the newly created container.
func (s *Client) CreateContainer(ctx context.Context, containerName string, options *CreateContainerOptions) (CreateContainerResponse, error) {
containerClient := s.NewContainerClient(containerName)
containerCreateResp, err := containerClient.Create(ctx, options)
return containerCreateResp, err
}
// DeleteContainer is a lifecycle method that marks the specified container for deletion.
// The container and any blobs contained within it are later deleted during garbage collection.
// If the container is not found, a ResourceNotFoundError will be raised.
func (s *Client) DeleteContainer(ctx context.Context, containerName string, options *DeleteContainerOptions) (DeleteContainerResponse, error) {
containerClient := s.NewContainerClient(containerName)
containerDeleteResp, err := containerClient.Delete(ctx, options)
return containerDeleteResp, err
}
// RestoreContainer restores soft-deleted container
// Operation will only be successful if used within the specified number of days set in the delete retention policy
func (s *Client) RestoreContainer(ctx context.Context, deletedContainerName string, deletedContainerVersion string, options *RestoreContainerOptions) (RestoreContainerResponse, error) {
containerClient := s.NewContainerClient(deletedContainerName)
containerRestoreResp, err := containerClient.Restore(ctx, deletedContainerVersion, options)
return containerRestoreResp, err
}
// GetAccountInfo provides account level information
func (s *Client) GetAccountInfo(ctx context.Context, o *GetAccountInfoOptions) (GetAccountInfoResponse, error) {
getAccountInfoOptions := o.format()
resp, err := s.generated().GetAccountInfo(ctx, getAccountInfoOptions)
return resp, err
}
// NewListContainersPager operation returns a pager of the containers under the specified account.
// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2.
func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager[ListContainersResponse] {
listOptions := generated.ServiceClientListContainersSegmentOptions{}
if o != nil {
if o.Include.Deleted {
listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeDeleted)
}
if o.Include.Metadata {
listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeMetadata)
}
listOptions.Marker = o.Marker
listOptions.Maxresults = o.MaxResults
listOptions.Prefix = o.Prefix
}
return runtime.NewPager(runtime.PagingHandler[ListContainersResponse]{
More: func(page ListContainersResponse) bool {
return page.NextMarker != nil && len(*page.NextMarker) > 0
},
Fetcher: func(ctx context.Context, page *ListContainersResponse) (ListContainersResponse, error) {
var req *policy.Request
var err error
if page == nil {
req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions)
} else {
listOptions.Marker = page.Marker
req, err = s.generated().ListContainersSegmentCreateRequest(ctx, &listOptions)
}
if err != nil {
return ListContainersResponse{}, err
}
resp, err := s.generated().Pipeline().Do(req)
if err != nil {
return ListContainersResponse{}, err
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
return ListContainersResponse{}, runtime.NewResponseError(resp)
}
return s.generated().ListContainersSegmentHandleResponse(resp)
},
})
}
// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics
// and CORS (Cross-Origin Resource Sharing) rules.
func (s *Client) GetProperties(ctx context.Context, o *GetPropertiesOptions) (GetPropertiesResponse, error) {
getPropertiesOptions := o.format()
resp, err := s.generated().GetProperties(ctx, getPropertiesOptions)
return resp, err
}
// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics.
// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved.
func (s *Client) SetProperties(ctx context.Context, o *SetPropertiesOptions) (SetPropertiesResponse, error) {
properties, setPropertiesOptions := o.format()
resp, err := s.generated().SetProperties(ctx, properties, setPropertiesOptions)
return resp, err
}
// GetStatistics Retrieves statistics related to replication for the Blob service.
// It is only available when read-access geo-redundant replication is enabled for the storage account.
// With geo-redundant replication, Azure Storage maintains your data durable
// in two locations. In both locations, Azure Storage constantly maintains
// multiple healthy replicas of your data. The location where you read,
// create, update, or delete data is the primary storage account location.
// The primary location exists in the region you choose at the time you
// create an account via the Azure Management Azure classic portal, for
// example, North Central US. The location to which your data is replicated
// is the secondary location. The secondary location is automatically
// determined based on the location of the primary; it is in a second data
// center that resides in the same region as the primary location. Read-only
// access is available from the secondary location, if read-access geo-redundant
// replication is enabled for your storage account.
func (s *Client) GetStatistics(ctx context.Context, o *GetStatisticsOptions) (GetStatisticsResponse, error) {
getStatisticsOptions := o.format()
resp, err := s.generated().GetStatistics(ctx, getStatisticsOptions)
return resp, err
}
// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
// This validity can be checked with CanGetAccountSASToken().
func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.AccountPermissions, services sas.AccountServices, start time.Time, expiry time.Time) (string, error) {
if s.sharedKey() == nil {
return "", errors.New("SAS can only be signed with a SharedKeyCredential")
}
qps, err := sas.AccountSignatureValues{
Version: sas.Version,
Protocol: sas.ProtocolHTTPS,
Permissions: permissions.String(),
Services: services.String(),
ResourceTypes: resources.String(),
StartTime: start.UTC(),
ExpiryTime: expiry.UTC(),
}.SignWithSharedKey(s.sharedKey())
if err != nil {
return "", err
}
endpoint := s.URL()
if !strings.HasSuffix(endpoint, "/") {
endpoint += "/"
}
endpoint += "?" + qps.Encode()
return endpoint, nil
}
// FilterBlobs operation finds all blobs in the storage account whose tags match a given search expression.
// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container.
// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags
// eg. "dog='germanshepherd' and penguin='emperorpenguin'"
// To specify a container, eg. "@container=containerName and Name = C"
func (s *Client) FilterBlobs(ctx context.Context, o *FilterBlobsOptions) (FilterBlobsResponse, error) {
serviceFilterBlobsOptions := o.format()
resp, err := s.generated().FilterBlobs(ctx, serviceFilterBlobsOptions)
return resp, err
}

View file

@ -0,0 +1,92 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package service
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
const (
// ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container.
ContainerNameRoot = "$root"
// ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container.
ContainerNameLogs = "$logs"
)
// SKUName defines values for SkuName - LRS, GRS, RAGRS, ZRS, Premium LRS
type SKUName = generated.SKUName
const (
SKUNameStandardLRS SKUName = generated.SKUNameStandardLRS
SKUNameStandardGRS SKUName = generated.SKUNameStandardGRS
SKUNameStandardRAGRS SKUName = generated.SKUNameStandardRAGRS
SKUNameStandardZRS SKUName = generated.SKUNameStandardZRS
SKUNamePremiumLRS SKUName = generated.SKUNamePremiumLRS
)
// PossibleSKUNameValues returns the possible values for the SKUName const type.
func PossibleSKUNameValues() []SKUName {
return generated.PossibleSKUNameValues()
}
// ListContainersIncludeType defines values for ListContainersIncludeType
type ListContainersIncludeType = generated.ListContainersIncludeType
const (
ListContainersIncludeTypeMetadata ListContainersIncludeType = generated.ListContainersIncludeTypeMetadata
ListContainersIncludeTypeDeleted ListContainersIncludeType = generated.ListContainersIncludeTypeDeleted
ListContainersIncludeTypeSystem ListContainersIncludeType = generated.ListContainersIncludeTypeSystem
)
// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type.
func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType {
return generated.PossibleListContainersIncludeTypeValues()
}
// AccountKind defines values for AccountKind
type AccountKind = generated.AccountKind
const (
AccountKindStorage AccountKind = generated.AccountKindStorage
AccountKindBlobStorage AccountKind = generated.AccountKindBlobStorage
AccountKindStorageV2 AccountKind = generated.AccountKindStorageV2
AccountKindFileStorage AccountKind = generated.AccountKindFileStorage
AccountKindBlockBlobStorage AccountKind = generated.AccountKindBlockBlobStorage
)
// PossibleAccountKindValues returns the possible values for the AccountKind const type.
func PossibleAccountKindValues() []AccountKind {
return generated.PossibleAccountKindValues()
}
// BlobGeoReplicationStatus - The status of the secondary location
type BlobGeoReplicationStatus = generated.BlobGeoReplicationStatus
const (
BlobGeoReplicationStatusLive BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusLive
BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusBootstrap
BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = generated.BlobGeoReplicationStatusUnavailable
)
// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type.
func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus {
return generated.PossibleBlobGeoReplicationStatusValues()
}
// PublicAccessType defines values for AccessType - private (default) or blob or container
type PublicAccessType = generated.PublicAccessType
const (
PublicAccessTypeBlob PublicAccessType = generated.PublicAccessTypeBlob
PublicAccessTypeContainer PublicAccessType = generated.PublicAccessTypeContainer
)
// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type.
func PossiblePublicAccessTypeValues() []PublicAccessType {
return generated.PossiblePublicAccessTypeValues()
}

View file

@ -0,0 +1,220 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package service
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// SharedKeyCredential contains an account's name and its primary or secondary key.
type SharedKeyCredential = exported.SharedKeyCredential
// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the
// storage account's name and either its primary or secondary key.
func NewSharedKeyCredential(accountName, accountKey string) (*SharedKeyCredential, error) {
return exported.NewSharedKeyCredential(accountName, accountKey)
}
// UserDelegationCredential contains an account's name and its user delegation key.
type UserDelegationCredential = exported.UserDelegationCredential
// UserDelegationKey contains UserDelegationKey.
type UserDelegationKey = generated.UserDelegationKey
// KeyInfo contains KeyInfo struct.
type KeyInfo = generated.KeyInfo
// GetUserDelegationCredentialOptions contains optional parameters for Service.GetUserDelegationKey method
type GetUserDelegationCredentialOptions struct {
// placeholder for future options
}
func (o *GetUserDelegationCredentialOptions) format() *generated.ServiceClientGetUserDelegationKeyOptions {
return nil
}
// AccessConditions identifies container-specific access conditions which you optionally set.
type AccessConditions = exported.ContainerAccessConditions
// CpkInfo contains a group of parameters for the BlobClient.Download method.
type CpkInfo = generated.CpkInfo
// CpkScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
type CpkScopeInfo = generated.CpkScopeInfo
// CreateContainerOptions contains the optional parameters for the container.Client.Create method.
type CreateContainerOptions = container.CreateOptions
// DeleteContainerOptions contains the optional parameters for the container.Client.Delete method.
type DeleteContainerOptions = container.DeleteOptions
// RestoreContainerOptions contains the optional parameters for the container.Client.Restore method.
type RestoreContainerOptions = container.RestoreOptions
// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another
// domain. Web browsers implement a security restriction known as same-origin policy that
// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin
// domain) to call APIs in another domain
type CorsRule = generated.CorsRule
// RetentionPolicy - the retention policy which determines how long the associated data should persist
type RetentionPolicy = generated.RetentionPolicy
// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs
type Metrics = generated.Metrics
// Logging - Azure Analytics Logging settings.
type Logging = generated.Logging
// StaticWebsite - The properties that enable an account to host a static website
type StaticWebsite = generated.StaticWebsite
// StorageServiceProperties - Storage Service Properties.
type StorageServiceProperties = generated.StorageServiceProperties
// StorageServiceStats - Stats for the storage service.
type StorageServiceStats = generated.StorageServiceStats
// ---------------------------------------------------------------------------------------------------------------------
// GetAccountInfoOptions provides set of options for Client.GetAccountInfo
type GetAccountInfoOptions struct {
// placeholder for future options
}
func (o *GetAccountInfoOptions) format() *generated.ServiceClientGetAccountInfoOptions {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// GetPropertiesOptions contains the optional parameters for the Client.GetProperties method.
type GetPropertiesOptions struct {
// placeholder for future options
}
func (o *GetPropertiesOptions) format() *generated.ServiceClientGetPropertiesOptions {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// ListContainersOptions provides set of configurations for ListContainers operation
type ListContainersOptions struct {
Include ListContainersInclude
// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
// operation returns the NextMarker value within the response body if the listing operation did not return all containers
// remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in
// a subsequent call to request the next page of list items. The marker value is opaque to the client.
Marker *string
// Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value
// greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary,
// then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible
// that the service will return fewer results than specified by max results, or than the default of 5000.
MaxResults *int32
// Filters the results to return only containers whose name begins with the specified prefix.
Prefix *string
}
// ListContainersInclude indicates what additional information the service should return with each container.
type ListContainersInclude struct {
// Tells the service whether to return metadata for each container.
Metadata bool
// Tells the service whether to return soft-deleted containers.
Deleted bool
}
// ---------------------------------------------------------------------------------------------------------------------
// SetPropertiesOptions provides set of options for Client.SetProperties
type SetPropertiesOptions struct {
// The set of CORS rules.
Cors []*CorsRule
// The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible
// values include version 2008-10-27 and all more recent versions
DefaultServiceVersion *string
// the retention policy which determines how long the associated data should persist
DeleteRetentionPolicy *RetentionPolicy
// a summary of request statistics grouped by API in hour or minute aggregates for blobs
HourMetrics *Metrics
// Azure Analytics Logging settings.
Logging *Logging
// a summary of request statistics grouped by API in hour or minute aggregates for blobs
MinuteMetrics *Metrics
// The properties that enable an account to host a static website
StaticWebsite *StaticWebsite
}
func (o *SetPropertiesOptions) format() (generated.StorageServiceProperties, *generated.ServiceClientSetPropertiesOptions) {
if o == nil {
return generated.StorageServiceProperties{}, nil
}
return generated.StorageServiceProperties{
Cors: o.Cors,
DefaultServiceVersion: o.DefaultServiceVersion,
DeleteRetentionPolicy: o.DeleteRetentionPolicy,
HourMetrics: o.HourMetrics,
Logging: o.Logging,
MinuteMetrics: o.MinuteMetrics,
StaticWebsite: o.StaticWebsite,
}, nil
}
// ---------------------------------------------------------------------------------------------------------------------
// GetStatisticsOptions provides set of options for Client.GetStatistics
type GetStatisticsOptions struct {
// placeholder for future options
}
func (o *GetStatisticsOptions) format() *generated.ServiceClientGetStatisticsOptions {
return nil
}
// ---------------------------------------------------------------------------------------------------------------------
// FilterBlobsOptions provides set of options for Client.FindBlobsByTags
type FilterBlobsOptions struct {
// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
// operation returns the NextMarker value within the response body if the listing
// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
// as the value for the marker parameter in a subsequent call to request the next
// page of list items. The marker value is opaque to the client.
Marker *string
// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
// greater than 5000, the server will return up to 5000 items. Note that if the
// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
// of the results. For this reason, it is possible that the service will
// return fewer results than specified by maxresults, or than the default of 5000.
MaxResults *int32
// Filters the results to return only to return only blobs whose tags match the specified expression.
Where *string
}
func (o *FilterBlobsOptions) format() *generated.ServiceClientFilterBlobsOptions {
if o == nil {
return nil
}
return &generated.ServiceClientFilterBlobsOptions{
Marker: o.Marker,
Maxresults: o.MaxResults,
Where: o.Where,
}
}

View file

@ -0,0 +1,41 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
package service
import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
// CreateContainerResponse contains the response from method container.Client.Create.
type CreateContainerResponse = generated.ContainerClientCreateResponse
// DeleteContainerResponse contains the response from method container.Client.Delete
type DeleteContainerResponse = generated.ContainerClientDeleteResponse
// RestoreContainerResponse contains the response from method container.Client.Restore
type RestoreContainerResponse = generated.ContainerClientRestoreResponse
// GetAccountInfoResponse contains the response from method Client.GetAccountInfo.
type GetAccountInfoResponse = generated.ServiceClientGetAccountInfoResponse
// ListContainersResponse contains the response from method Client.ListContainersSegment.
type ListContainersResponse = generated.ServiceClientListContainersSegmentResponse
// GetPropertiesResponse contains the response from method Client.GetProperties.
type GetPropertiesResponse = generated.ServiceClientGetPropertiesResponse
// SetPropertiesResponse contains the response from method Client.SetProperties.
type SetPropertiesResponse = generated.ServiceClientSetPropertiesResponse
// GetStatisticsResponse contains the response from method Client.GetStatistics.
type GetStatisticsResponse = generated.ServiceClientGetStatisticsResponse
// FilterBlobsResponse contains the response from method Client.FilterBlobs.
type FilterBlobsResponse = generated.ServiceClientFilterBlobsResponse
// GetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey.
type GetUserDelegationKeyResponse = generated.ServiceClientGetUserDelegationKeyResponse

View file

@ -0,0 +1,513 @@
{
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"baseName": {
"type": "String"
},
"tenantId": {
"type": "string",
"defaultValue": "72f988bf-86f1-41af-91ab-2d7cd011db47",
"metadata": {
"description": "The tenant ID to which the application and resources belong."
}
},
"testApplicationOid": {
"type": "string",
"metadata": {
"description": "The principal to assign the role to. This is application object id."
}
}
},
"variables": {
"mgmtApiVersion": "2019-06-01",
"authorizationApiVersion": "2018-09-01-preview",
"blobDataContributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe')]",
"contributorRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c')]",
"blobDataOwnerRoleId": "[concat('/subscriptions/', subscription().subscriptionId, '/providers/Microsoft.Authorization/roleDefinitions/b7e6dc6d-f1e8-4753-8033-0f276bb0955b')]",
"primaryAccountName": "[concat(parameters('baseName'), 'prim')]",
"primaryEncryptionScopeName": "encryptionScope",
"primaryEncryptionScope": "[concat(parameters('baseName'), 'prim', concat('/', variables('primaryEncryptionScopeName')))]",
"secondaryAccountName": "[concat(parameters('baseName'), 'sec')]",
"premiumAccountName": "[concat(parameters('baseName'), 'prem')]",
"dataLakeAccountName": "[concat(parameters('baseName'), 'dtlk')]",
"softDeleteAccountName": "[concat(parameters('baseName'), 'sftdl')]",
"premiumFileAccountName": "[concat(parameters('baseName'), 'pfile')]",
"webjobsPrimaryAccountName": "[concat(parameters('baseName'), 'wjprim')]",
"webjobsSecondaryAccountName": "[concat(parameters('baseName'), 'wjsec')]",
"location": "[resourceGroup().location]",
"resourceGroupName": "[resourceGroup().name]",
"subscriptionId": "[subscription().subscriptionId]",
"encryption": {
"services": {
"file": {
"enabled": true
},
"blob": {
"enabled": true
}
},
"keySource": "Microsoft.Storage"
},
"networkAcls": {
"bypass": "AzureServices",
"virtualNetworkRules": [],
"ipRules": [],
"defaultAction": "Allow"
}
},
"resources": [
{
"type": "Microsoft.Authorization/roleAssignments",
"apiVersion": "[variables('authorizationApiVersion')]",
"name": "[guid(concat('dataContributorRoleId', resourceGroup().id))]",
"properties": {
"roleDefinitionId": "[variables('blobDataContributorRoleId')]",
"principalId": "[parameters('testApplicationOid')]"
}
},
{
"type": "Microsoft.Authorization/roleAssignments",
"apiVersion": "[variables('authorizationApiVersion')]",
"name": "[guid(concat('contributorRoleId', resourceGroup().id))]",
"properties": {
"roleDefinitionId": "[variables('contributorRoleId')]",
"principalId": "[parameters('testApplicationOid')]"
}
},
{
"type": "Microsoft.Authorization/roleAssignments",
"apiVersion": "[variables('authorizationApiVersion')]",
"name": "[guid(concat('blobDataOwnerRoleId', resourceGroup().id))]",
"properties": {
"roleDefinitionId": "[variables('blobDataOwnerRoleId')]",
"principalId": "[parameters('testApplicationOid')]"
}
},
{
"type": "Microsoft.Storage/storageAccounts",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[variables('primaryAccountName')]",
"location": "[variables('location')]",
"sku": {
"name": "Standard_RAGRS",
"tier": "Standard"
},
"kind": "StorageV2",
"properties": {
"networkAcls": "[variables('networkAcls')]",
"supportsHttpsTrafficOnly": true,
"encryption": "[variables('encryption')]",
"accessTier": "Hot"
}
},
{
"type": "Microsoft.Storage/storageAccounts/blobServices",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[concat(variables('primaryAccountName'), '/default')]",
"properties": {
"isVersioningEnabled": true,
"lastAccessTimeTrackingPolicy": {
"enable": true,
"name": "AccessTimeTracking",
"trackingGranularityInDays": 1,
"blobType": [
"blockBlob"
]
}
},
"dependsOn": [
"[variables('primaryAccountName')]"
]
},
{
"type": "Microsoft.Storage/storageAccounts/encryptionScopes",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[variables('primaryEncryptionScope')]",
"properties": {
"source": "Microsoft.Storage",
"state": "Enabled"
},
"dependsOn": [
"[variables('primaryAccountName')]"
]
},
{
"type": "Microsoft.Storage/storageAccounts",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[variables('secondaryAccountName')]",
"location": "[variables('location')]",
"sku": {
"name": "Standard_RAGRS",
"tier": "Standard"
},
"kind": "StorageV2",
"properties": {
"networkAcls": "[variables('networkAcls')]",
"supportsHttpsTrafficOnly": true,
"encryption": "[variables('encryption')]",
"accessTier": "Hot"
}
},
{
"type": "Microsoft.Storage/storageAccounts",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[variables('premiumAccountName')]",
"location": "[variables('location')]",
"sku": {
"name": "Premium_LRS",
"tier": "Premium"
},
"kind": "StorageV2",
"properties": {
"networkAcls": "[variables('networkAcls')]",
"supportsHttpsTrafficOnly": true,
"encryption": "[variables('encryption')]",
"accessTier": "Hot"
}
},
{
"type": "Microsoft.Storage/storageAccounts",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[variables('dataLakeAccountName')]",
"location": "[variables('location')]",
"sku": {
"name": "Standard_RAGRS",
"tier": "Standard"
},
"kind": "StorageV2",
"properties": {
"isHnsEnabled": true,
"networkAcls": "[variables('networkAcls')]",
"supportsHttpsTrafficOnly": true,
"encryption": "[variables('encryption')]",
"accessTier": "Hot"
}
},
{
"type": "Microsoft.Storage/storageAccounts/blobServices",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[concat(variables('dataLakeAccountName'), '/default')]",
"properties": {
"containerDeleteRetentionPolicy": {
"enabled": true,
"days": 1
}
},
"dependsOn": [
"[variables('dataLakeAccountName')]"
]
},
{
"type": "Microsoft.Storage/storageAccounts",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[variables('softDeleteAccountName')]",
"location": "[variables('location')]",
"sku": {
"name": "Standard_RAGRS",
"tier": "Standard"
},
"kind": "StorageV2",
"properties": {
"networkAcls": "[variables('networkAcls')]",
"supportsHttpsTrafficOnly": true,
"encryption": "[variables('encryption')]",
"accessTier": "Hot"
}
},
{
"type": "Microsoft.Storage/storageAccounts/blobServices",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[concat(variables('softDeleteAccountName'), '/default')]",
"properties": {
"deleteRetentionPolicy": {
"enabled": true,
"days": 1
},
"containerDeleteRetentionPolicy": {
"enabled": true,
"days": 1
}
},
"dependsOn": [
"[variables('softDeleteAccountName')]"
]
},
{
"type": "Microsoft.Storage/storageAccounts/fileServices",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[concat(variables('softDeleteAccountName'), '/default')]",
"properties": {
"shareDeleteRetentionPolicy": {
"enabled": true,
"days": 1
}
},
"dependsOn": [
"[variables('softDeleteAccountName')]"
]
},
{
"type": "Microsoft.Storage/storageAccounts",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[variables('premiumFileAccountName')]",
"location": "[variables('location')]",
"sku": {
"name": "Premium_LRS",
"tier": "Premium"
},
"kind": "FileStorage",
"properties": {
"networkAcls": "[variables('networkAcls')]",
"supportsHttpsTrafficOnly": true,
"encryption": "[variables('encryption')]",
"accessTier": "Hot"
}
},
{
"type": "Microsoft.Storage/storageAccounts",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[variables('webjobsPrimaryAccountName')]",
"location": "[variables('location')]",
"sku": {
"name": "Standard_RAGRS",
"tier": "Standard"
},
"kind": "StorageV2",
"properties": {
"networkAcls": "[variables('networkAcls')]",
"supportsHttpsTrafficOnly": true,
"encryption": "[variables('encryption')]",
"accessTier": "Hot"
}
},
{
"type": "Microsoft.Storage/storageAccounts",
"apiVersion": "[variables('mgmtApiVersion')]",
"name": "[variables('webjobsSecondaryAccountName')]",
"location": "[variables('location')]",
"sku": {
"name": "Standard_RAGRS",
"tier": "Standard"
},
"kind": "StorageV2",
"properties": {
"networkAcls": "[variables('networkAcls')]",
"supportsHttpsTrafficOnly": true,
"encryption": "[variables('encryption')]",
"accessTier": "Hot"
}
}
],
"functions": [
{
"namespace": "url",
"members": {
"serviceEndpointSuffix": {
"parameters": [
{
"name": "endpoint",
"type": "string"
}
],
"output": {
"type": "string",
"value": "[substring(parameters('endpoint'), add(indexOf(parameters('endpoint'), '.'),1), sub(length(parameters('endpoint')), add(indexOf(parameters('endpoint'), '.'),2)))]"
}
}
}
},
{
"namespace": "connectionString",
"members": {
"create": {
"parameters": [
{
"name": "accountName",
"type": "string"
},
{
"name": "accountKey",
"type": "string"
},
{
"name": "blobEndpoint",
"type": "string"
},
{
"name": "queueEndpoint",
"type": "string"
},
{
"name": "fileEndpoint",
"type": "string"
},
{
"name": "tableEndpoint",
"type": "string"
}
],
"output": {
"type": "string",
"value": "[concat('DefaultEndpointsProtocol=https;AccountName=', parameters('accountName'), ';AccountKey=', parameters('accountKey'), ';BlobEndpoint=', parameters('blobEndpoint'), ';QueueEndpoint=', parameters('queueEndpoint'), ';FileEndpoint=', parameters('fileEndpoint'), ';TableEndpoint=', parameters('tableEndpoint'))]"
}
}
}
}
],
"outputs": {
"AZURE_STORAGE_ACCOUNT_NAME": {
"type": "string",
"value": "[variables('primaryAccountName')]"
},
"AZURE_STORAGE_ACCOUNT_KEY": {
"type": "string",
"value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).keys[0].value]"
},
"PRIMARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]"
},
"PRIMARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]"
},
"PRIMARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]"
},
"PRIMARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('primaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]"
},
"SECONDARY_AZURE_STORAGE_ACCOUNT_NAME": {
"type": "string",
"value": "[variables('secondaryAccountName')]"
},
"SECONDARY_AZURE_STORAGE_ACCOUNT_KEY": {
"type": "string",
"value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]"
},
"SECONDARY_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]"
},
"SECONDARY_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]"
},
"SECONDARY_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]"
},
"SECONDARY_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]"
},
"BLOB_STORAGE_ACCOUNT_NAME": {
"type": "string",
"value": "[variables('secondaryAccountName')]"
},
"BLOB_STORAGE_ACCOUNT_KEY": {
"type": "string",
"value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('secondaryAccountName')), variables('mgmtApiVersion')).keys[0].value]"
},
"PREMIUM_AZURE_STORAGE_ACCOUNT_NAME": {
"type": "string",
"value": "[variables('premiumAccountName')]"
},
"PREMIUM_AZURE_STORAGE_ACCOUNT_KEY": {
"type": "string",
"value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).keys[0].value]"
},
"PREMIUM_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]"
},
"DATALAKE_STORAGE_ACCOUNT_NAME": {
"type": "string",
"value": "[variables('dataLakeAccountName')]"
},
"DATALAKE_STORAGE_ACCOUNT_KEY": {
"type": "string",
"value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).keys[0].value]"
},
"DATALAKE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]"
},
"DATALAKE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]"
},
"DATALAKE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]"
},
"DATALAKE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('dataLakeAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]"
},
"SOFT_DELETE_AZURE_STORAGE_ACCOUNT_NAME": {
"type": "string",
"value": "[variables('softDeleteAccountName')]"
},
"SOFT_DELETE_AZURE_STORAGE_ACCOUNT_KEY": {
"type": "string",
"value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).keys[0].value]"
},
"SOFT_DELETE_AZURE_STORAGE_ACCOUNT_BLOB_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob)]"
},
"SOFT_DELETE_AZURE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]"
},
"SOFT_DELETE_AZURE_STORAGE_ACCOUNT_QUEUE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue)]"
},
"SOFT_DELETE_AZURE_STORAGE_ACCOUNT_TABLE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('softDeleteAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]"
},
"PREMIUM_FILE_STORAGE_ACCOUNT_NAME": {
"type": "string",
"value": "[variables('premiumFileAccountName')]"
},
"PREMIUM_FILE_STORAGE_ACCOUNT_KEY": {
"type": "string",
"value": "[listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).keys[0].value]"
},
"PREMIUM_FILE_STORAGE_ACCOUNT_FILE_ENDPOINT_SUFFIX": {
"type": "string",
"value": "[url.serviceEndpointSuffix(reference(resourceId('Microsoft.Storage/storageAccounts', variables('premiumFileAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file)]"
},
"AZUREWEBJOBSSTORAGE": {
"type": "string",
"value": "[connectionString.create(variables('webjobsPrimaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsPrimaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]"
},
"AZUREWEBJOBSSECONDARYSTORAGE": {
"type": "string",
"value": "[connectionString.create(variables('webjobsSecondaryAccountName'), listKeys(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).keys[0].value, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.blob, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.queue, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.file, reference(resourceId('Microsoft.Storage/storageAccounts', variables('webjobsSecondaryAccountName')), variables('mgmtApiVersion')).primaryEndpoints.table)]"
},
"RESOURCE_GROUP_NAME": {
"type": "string",
"value": "[variables('resourceGroupName')]"
},
"SUBSCRIPTION_ID": {
"type": "string",
"value": "[variables('subscriptionId')]"
},
"LOCATION": {
"type": "string",
"value": "[variables('location')]"
},
"AZURE_STORAGE_ENCRYPTION_SCOPE": {
"type": "string",
"value": "[variables('primaryEncryptionScopeName')]"
}
}
}

View file

@ -1,154 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"io"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
// AppendBlobClient represents a client to an Azure Storage append blob;
type AppendBlobClient struct {
BlobClient
client *appendBlobClient
}
// NewAppendBlobClient creates an AppendBlobClient with the specified URL, Azure AD credential, and options.
func NewAppendBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*AppendBlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &AppendBlobClient{
client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
},
}, nil
}
// NewAppendBlobClientWithNoCredential creates an AppendBlobClient with the specified URL and options.
func NewAppendBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*AppendBlobClient, error) {
conOptions := getConnectionOptions(options)
conn := newConnection(blobURL, conOptions)
return &AppendBlobClient{
client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
},
}, nil
}
// NewAppendBlobClientWithSharedKey creates an AppendBlobClient with the specified URL, shared key, and options.
func NewAppendBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*AppendBlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &AppendBlobClient{
client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
sharedKey: cred,
},
}, nil
}
// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (ab *AppendBlobClient) WithSnapshot(snapshot string) (*AppendBlobClient, error) {
p, err := NewBlobURLParts(ab.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
endpoint := p.URL()
pipeline := ab.client.pl
return &AppendBlobClient{
client: newAppendBlobClient(endpoint, pipeline),
BlobClient: BlobClient{
client: newBlobClient(endpoint, pipeline),
sharedKey: ab.sharedKey,
},
}, nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (ab *AppendBlobClient) WithVersionID(versionID string) (*AppendBlobClient, error) {
p, err := NewBlobURLParts(ab.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
endpoint := p.URL()
pipeline := ab.client.pl
return &AppendBlobClient{
client: newAppendBlobClient(endpoint, pipeline),
BlobClient: BlobClient{
client: newBlobClient(endpoint, pipeline),
sharedKey: ab.sharedKey,
},
}, nil
}
// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (ab *AppendBlobClient) Create(ctx context.Context, options *AppendBlobCreateOptions) (AppendBlobCreateResponse, error) {
appendBlobAppendBlockOptions, blobHttpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format()
resp, err := ab.client.Create(ctx, 0, appendBlobAppendBlockOptions, blobHttpHeaders,
leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
return toAppendBlobCreateResponse(resp), handleError(err)
}
// AppendBlock writes a stream to a new block of data to the end of the existing append blob.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block.
func (ab *AppendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeekCloser, options *AppendBlobAppendBlockOptions) (AppendBlobAppendBlockResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return AppendBlobAppendBlockResponse{}, nil
}
appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := ab.client.AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions)
return toAppendBlobAppendBlockResponse(resp), handleError(err)
}
// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url.
func (ab *AppendBlobClient) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlobAppendBlockFromURLOptions) (AppendBlobAppendBlockFromURLResponse, error) {
appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format()
// content length should be 0 on * from URL. always. It's a 400 if it isn't.
resp, err := ab.client.AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo,
leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
return toAppendBlobAppendBlockFromURLResponse(resp), handleError(err)
}
// SealAppendBlob - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only.
// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal
func (ab *AppendBlobClient) SealAppendBlob(ctx context.Context, options *AppendBlobSealOptions) (AppendBlobSealResponse, error) {
leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := options.format()
resp, err := ab.client.Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions)
return toAppendBlobSealResponse(resp), handleError(err)
}

View file

@ -1,278 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"errors"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
// BlobClient represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob.
type BlobClient struct {
client *blobClient
sharedKey *SharedKeyCredential
}
// NewBlobClient creates a BlobClient object using the specified URL, Azure AD credential, and options.
func NewBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
}, nil
}
// NewBlobClientWithNoCredential creates a BlobClient object using the specified URL and options.
func NewBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlobClient, error) {
conOptions := getConnectionOptions(options)
conn := newConnection(blobURL, conOptions)
return &BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
}, nil
}
// NewBlobClientWithSharedKey creates a BlobClient object using the specified URL, shared key, and options.
func NewBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &BlobClient{
client: newBlobClient(blobURL, conn.Pipeline()),
sharedKey: cred,
}, nil
}
// NewBlobClientFromConnectionString creates BlobClient from a connection String
//nolint
func NewBlobClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*BlobClient, error) {
containerClient, err := NewContainerClientFromConnectionString(connectionString, containerName, options)
if err != nil {
return nil, err
}
return containerClient.NewBlobClient(blobName)
}
// URL returns the URL endpoint used by the BlobClient object.
func (b *BlobClient) URL() string {
return b.client.endpoint
}
// WithSnapshot creates a new BlobClient object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (b *BlobClient) WithSnapshot(snapshot string) (*BlobClient, error) {
p, err := NewBlobURLParts(b.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
pipeline := b.client.pl
return &BlobClient{
client: newBlobClient(p.URL(), pipeline),
sharedKey: b.sharedKey,
}, nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (b *BlobClient) WithVersionID(versionID string) (*BlobClient, error) {
p, err := NewBlobURLParts(b.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
pipeline := b.client.pl
return &BlobClient{
client: newBlobClient(p.URL(), pipeline),
sharedKey: b.sharedKey,
}, nil
}
// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob.
func (b *BlobClient) Download(ctx context.Context, options *BlobDownloadOptions) (BlobDownloadResponse, error) {
o, lease, cpk, accessConditions := options.format()
dr, err := b.client.Download(ctx, o, lease, cpk, accessConditions)
if err != nil {
return BlobDownloadResponse{}, handleError(err)
}
offset := int64(0)
count := int64(CountToEnd)
if options != nil && options.Offset != nil {
offset = *options.Offset
}
if options != nil && options.Count != nil {
count = *options.Count
}
eTag := ""
if dr.ETag != nil {
eTag = *dr.ETag
}
return BlobDownloadResponse{
b: b,
blobClientDownloadResponse: dr,
ctx: ctx,
getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: eTag},
ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules),
}, err
}
// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection.
// Note that deleting a blob also deletes all its snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob.
func (b *BlobClient) Delete(ctx context.Context, o *BlobDeleteOptions) (BlobDeleteResponse, error) {
basics, leaseInfo, accessConditions := o.format()
resp, err := b.client.Delete(ctx, basics, leaseInfo, accessConditions)
return toBlobDeleteResponse(resp), handleError(err)
}
// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob.
func (b *BlobClient) Undelete(ctx context.Context, o *BlobUndeleteOptions) (BlobUndeleteResponse, error) {
undeleteOptions := o.format()
resp, err := b.client.Undelete(ctx, undeleteOptions)
return toBlobUndeleteResponse(resp), handleError(err)
}
// SetTier operation sets the tier on a blob. The operation is allowed on a page
// blob in a premium storage account and on a block blob in a blob storage account (locally
// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and
// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation
// does not update the blob's ETag.
// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers.
func (b *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobSetTierOptions) (BlobSetTierResponse, error) {
basics, lease, accessConditions := options.format()
resp, err := b.client.SetTier(ctx, tier, basics, lease, accessConditions)
return toBlobSetTierResponse(resp), handleError(err)
}
// GetProperties returns the blob's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties.
func (b *BlobClient) GetProperties(ctx context.Context, options *BlobGetPropertiesOptions) (BlobGetPropertiesResponse, error) {
basics, lease, cpk, access := options.format()
resp, err := b.client.GetProperties(ctx, basics, lease, cpk, access)
return toGetBlobPropertiesResponse(resp), handleError(err)
}
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (b *BlobClient) SetHTTPHeaders(ctx context.Context, blobHttpHeaders BlobHTTPHeaders, options *BlobSetHTTPHeadersOptions) (BlobSetHTTPHeadersResponse, error) {
basics, lease, access := options.format()
resp, err := b.client.SetHTTPHeaders(ctx, basics, &blobHttpHeaders, lease, access)
return toBlobSetHTTPHeadersResponse(resp), handleError(err)
}
// SetMetadata changes a blob's metadata.
// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata.
func (b *BlobClient) SetMetadata(ctx context.Context, metadata map[string]string, options *BlobSetMetadataOptions) (BlobSetMetadataResponse, error) {
basics := blobClientSetMetadataOptions{
Metadata: metadata,
}
lease, cpk, cpkScope, access := options.format()
resp, err := b.client.SetMetadata(ctx, &basics, lease, cpk, cpkScope, access)
return toBlobSetMetadataResponse(resp), handleError(err)
}
// CreateSnapshot creates a read-only snapshot of a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob.
func (b *BlobClient) CreateSnapshot(ctx context.Context, options *BlobCreateSnapshotOptions) (BlobCreateSnapshotResponse, error) {
// CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter
// because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this
// performance hit.
basics, cpk, cpkScope, access, lease := options.format()
resp, err := b.client.CreateSnapshot(ctx, basics, cpk, cpkScope, access, lease)
return toBlobCreateSnapshotResponse(resp), handleError(err)
}
// StartCopyFromURL copies the data at the source URL to a blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob.
func (b *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobStartCopyOptions) (BlobStartCopyFromURLResponse, error) {
basics, srcAccess, destAccess, lease := options.format()
resp, err := b.client.StartCopyFromURL(ctx, copySource, basics, srcAccess, destAccess, lease)
return toBlobStartCopyFromURLResponse(resp), handleError(err)
}
// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob.
func (b *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobAbortCopyOptions) (BlobAbortCopyFromURLResponse, error) {
basics, lease := options.format()
resp, err := b.client.AbortCopyFromURL(ctx, copyID, basics, lease)
return toBlobAbortCopyFromURLResponse(resp), handleError(err)
}
// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot.
// Each call to this operation replaces all existing tags attached to the blob.
// To remove all tags from the blob, call this operation with no tags set.
// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags
func (b *BlobClient) SetTags(ctx context.Context, options *BlobSetTagsOptions) (BlobSetTagsResponse, error) {
blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.client.SetTags(ctx, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
return toBlobSetTagsResponse(resp), handleError(err)
}
// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot.
// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags
func (b *BlobClient) GetTags(ctx context.Context, options *BlobGetTagsOptions) (BlobGetTagsResponse, error) {
blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format()
resp, err := b.client.GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions)
return toBlobGetTagsResponse(resp), handleError(err)
}
// GetSASToken is a convenience method for generating a SAS token for the currently pointed at blob.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
func (b *BlobClient) GetSASToken(permissions BlobSASPermissions, start time.Time, expiry time.Time) (SASQueryParameters, error) {
urlParts, _ := NewBlobURLParts(b.URL())
t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot)
if err != nil {
t = time.Time{}
}
if b.sharedKey == nil {
return SASQueryParameters{}, errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential")
}
return BlobSASSignatureValues{
ContainerName: urlParts.ContainerName,
BlobName: urlParts.BlobName,
SnapshotTime: t,
Version: SASVersion,
Permissions: permissions.String(),
StartTime: start.UTC(),
ExpiryTime: expiry.UTC(),
}.NewSASQueryParameters(b.sharedKey)
}

View file

@ -1,98 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"errors"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
// BlobLeaseClient represents lease client on blob
type BlobLeaseClient struct {
BlobClient
leaseID *string
}
// NewBlobLeaseClient is constructor for BlobLeaseClient
func (b *BlobClient) NewBlobLeaseClient(leaseID *string) (*BlobLeaseClient, error) {
if leaseID == nil {
generatedUuid, err := uuid.New()
if err != nil {
return nil, err
}
leaseID = to.Ptr(generatedUuid.String())
}
return &BlobLeaseClient{
BlobClient: *b,
leaseID: leaseID,
}, nil
}
// AcquireLease acquires a lease on the blob for write and delete operations.
//The lease Duration must be between 15 and 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (blc *BlobLeaseClient) AcquireLease(ctx context.Context, options *BlobAcquireLeaseOptions) (BlobAcquireLeaseResponse, error) {
blobAcquireLeaseOptions, modifiedAccessConditions := options.format()
blobAcquireLeaseOptions.ProposedLeaseID = blc.leaseID
resp, err := blc.client.AcquireLease(ctx, &blobAcquireLeaseOptions, modifiedAccessConditions)
return toBlobAcquireLeaseResponse(resp), handleError(err)
}
// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1)
// constant to break a fixed-Duration lease when it expires or an infinite lease immediately.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (blc *BlobLeaseClient) BreakLease(ctx context.Context, options *BlobBreakLeaseOptions) (BlobBreakLeaseResponse, error) {
blobBreakLeaseOptions, modifiedAccessConditions := options.format()
resp, err := blc.client.BreakLease(ctx, blobBreakLeaseOptions, modifiedAccessConditions)
return toBlobBreakLeaseResponse(resp), handleError(err)
}
// ChangeLease changes the blob's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (blc *BlobLeaseClient) ChangeLease(ctx context.Context, options *BlobChangeLeaseOptions) (BlobChangeLeaseResponse, error) {
if blc.leaseID == nil {
return BlobChangeLeaseResponse{}, errors.New("leaseID cannot be nil")
}
proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format()
if err != nil {
return BlobChangeLeaseResponse{}, err
}
resp, err := blc.client.ChangeLease(ctx, *blc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions)
// If lease has been changed successfully, set the leaseID in client
if err == nil {
blc.leaseID = proposedLeaseID
}
return toBlobChangeLeaseResponse(resp), handleError(err)
}
// RenewLease renews the blob's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (blc *BlobLeaseClient) RenewLease(ctx context.Context, options *BlobRenewLeaseOptions) (BlobRenewLeaseResponse, error) {
if blc.leaseID == nil {
return BlobRenewLeaseResponse{}, errors.New("leaseID cannot be nil")
}
renewLeaseBlobOptions, modifiedAccessConditions := options.format()
resp, err := blc.client.RenewLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions)
return toBlobRenewLeaseResponse(resp), handleError(err)
}
// ReleaseLease releases the blob's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob.
func (blc *BlobLeaseClient) ReleaseLease(ctx context.Context, options *ReleaseLeaseBlobOptions) (BlobReleaseLeaseResponse, error) {
if blc.leaseID == nil {
return BlobReleaseLeaseResponse{}, errors.New("leaseID cannot be nil")
}
renewLeaseBlobOptions, modifiedAccessConditions := options.format()
resp, err := blc.client.ReleaseLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions)
return toBlobReleaseLeaseResponse(resp), handleError(err)
}

View file

@ -1,201 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"io"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
)
// BlockBlobClient defines a set of operations applicable to block blobs.
type BlockBlobClient struct {
BlobClient
client *blockBlobClient
}
// NewBlockBlobClient creates a BlockBlobClient object using the specified URL, Azure AD credential, and options.
func NewBlockBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlockBlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
bClient := newBlobClient(conn.Endpoint(), conn.Pipeline())
return &BlockBlobClient{
client: newBlockBlobClient(bClient.endpoint, bClient.pl),
BlobClient: BlobClient{
client: bClient,
},
}, nil
}
// NewBlockBlobClientWithNoCredential creates a BlockBlobClient object using the specified URL and options.
func NewBlockBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlockBlobClient, error) {
conOptions := getConnectionOptions(options)
conn := newConnection(blobURL, conOptions)
bClient := newBlobClient(conn.Endpoint(), conn.Pipeline())
return &BlockBlobClient{
client: newBlockBlobClient(bClient.endpoint, bClient.pl),
BlobClient: BlobClient{
client: bClient,
},
}, nil
}
// NewBlockBlobClientWithSharedKey creates a BlockBlobClient object using the specified URL, shared key, and options.
func NewBlockBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlockBlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
bClient := newBlobClient(conn.Endpoint(), conn.Pipeline())
return &BlockBlobClient{
client: newBlockBlobClient(bClient.endpoint, bClient.pl),
BlobClient: BlobClient{
client: bClient,
sharedKey: cred,
},
}, nil
}
// WithSnapshot creates a new BlockBlobClient object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (bb *BlockBlobClient) WithSnapshot(snapshot string) (*BlockBlobClient, error) {
p, err := NewBlobURLParts(bb.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
endpoint := p.URL()
bClient := newBlobClient(endpoint, bb.client.pl)
return &BlockBlobClient{
client: newBlockBlobClient(bClient.endpoint, bClient.pl),
BlobClient: BlobClient{
client: bClient,
sharedKey: bb.sharedKey,
},
}, nil
}
// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id.
// Pass "" to remove the versionID returning a URL to the base blob.
func (bb *BlockBlobClient) WithVersionID(versionID string) (*BlockBlobClient, error) {
p, err := NewBlobURLParts(bb.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
endpoint := p.URL()
bClient := newBlobClient(endpoint, bb.client.pl)
return &BlockBlobClient{
client: newBlockBlobClient(bClient.endpoint, bClient.pl),
BlobClient: BlobClient{
client: bClient,
sharedKey: bb.sharedKey,
},
}, nil
}
// Upload creates a new block blob or overwrites an existing block blob.
// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not
// supported with Upload; the content of the existing blob is overwritten with the new content. To
// perform a partial update of a block blob, use StageBlock and CommitBlockList.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (bb *BlockBlobClient) Upload(ctx context.Context, body io.ReadSeekCloser, options *BlockBlobUploadOptions) (BlockBlobUploadResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return BlockBlobUploadResponse{}, err
}
basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format()
resp, err := bb.client.Upload(ctx, count, body, basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions)
return toBlockBlobUploadResponse(resp), handleError(err)
}
// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block.
func (bb *BlockBlobClient) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser,
options *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return BlockBlobStageBlockResponse{}, err
}
stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format()
resp, err := bb.client.StageBlock(ctx, base64BlockID, count, body, stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo)
return toBlockBlobStageBlockResponse(resp), handleError(err)
}
// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList.
// If count is CountToEnd (0), then data is read from specified offset to the end.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url.
func (bb *BlockBlobClient) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string,
contentLength int64, options *BlockBlobStageBlockFromURLOptions) (BlockBlobStageBlockFromURLResponse, error) {
stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format()
resp, err := bb.client.StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions,
cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions)
return toBlockBlobStageBlockFromURLResponse(resp), handleError(err)
}
// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob.
// In order to be written as part of a blob, a block must have been successfully written
// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob
// by uploading only those blocks that have changed, then committing the new and existing
// blocks together. Any blocks not specified in the block list and permanently deleted.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list.
func (bb *BlockBlobClient) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) {
// this is a code smell in the generated code
blockIds := make([]*string, len(base64BlockIDs))
for k, v := range base64BlockIDs {
blockIds[k] = to.Ptr(v)
}
blockLookupList := BlockLookupList{Latest: blockIds}
commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess := options.format()
resp, err := bb.client.CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess)
return toBlockBlobCommitBlockListResponse(resp), handleError(err)
}
// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list.
func (bb *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobGetBlockListOptions) (BlockBlobGetBlockListResponse, error) {
o, lac, mac := options.format()
resp, err := bb.client.GetBlockList(ctx, listType, o, lac, mac)
return toBlockBlobGetBlockListResponse(resp), handleError(err)
}
// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB.
// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url.
func (bb *BlockBlobClient) CopyFromURL(ctx context.Context, source string, options *BlockBlobCopyFromURLOptions) (BlockBlobCopyFromURLResponse, error) {
copyOptions, smac, mac, lac := options.format()
resp, err := bb.BlobClient.client.CopyFromURL(ctx, source, copyOptions, smac, mac, lac)
return toBlockBlobCopyFromURLResponse(resp), handleError(err)
}

View file

@ -1,88 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"errors"
"fmt"
"strings"
)
var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " +
"should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=<accountName>;" +
"AccountKey=<accountKey>;EndpointSuffix=core.windows.net'")
// convertConnStrToMap converts a connection string (in format key1=value1;key2=value2;key3=value3;) into a map of key-value pairs
func convertConnStrToMap(connStr string) (map[string]string, error) {
ret := make(map[string]string)
connStr = strings.TrimRight(connStr, ";")
splitString := strings.Split(connStr, ";")
if len(splitString) == 0 {
return ret, errConnectionString
}
for _, stringPart := range splitString {
parts := strings.SplitN(stringPart, "=", 2)
if len(parts) != 2 {
return ret, errConnectionString
}
ret[parts[0]] = parts[1]
}
return ret, nil
}
// parseConnectionString parses a connection string into a service URL and a SharedKeyCredential or a service url with the
// SharedAccessSignature combined.
func parseConnectionString(connectionString string) (string, *SharedKeyCredential, error) {
var serviceURL string
var cred *SharedKeyCredential
defaultScheme := "https"
defaultSuffix := "core.windows.net"
connStrMap, err := convertConnStrToMap(connectionString)
if err != nil {
return "", nil, err
}
accountName, ok := connStrMap["AccountName"]
if !ok {
return "", nil, errConnectionString
}
accountKey, ok := connStrMap["AccountKey"]
if !ok {
sharedAccessSignature, ok := connStrMap["SharedAccessSignature"]
if !ok {
return "", nil, errConnectionString
}
return fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), nil, nil
}
protocol, ok := connStrMap["DefaultEndpointsProtocol"]
if !ok {
protocol = defaultScheme
}
suffix, ok := connStrMap["EndpointSuffix"]
if !ok {
suffix = defaultSuffix
}
blobEndpoint, ok := connStrMap["BlobEndpoint"]
if ok {
cred, err = NewSharedKeyCredential(accountName, accountKey)
return blobEndpoint, cred, err
}
serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix)
cred, err = NewSharedKeyCredential(accountName, accountKey)
if err != nil {
return "", nil, err
}
return serviceURL, cred, nil
}

View file

@ -1,253 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"errors"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
// ContainerClient represents a URL to the Azure Storage container allowing you to manipulate its blobs.
type ContainerClient struct {
client *containerClient
sharedKey *SharedKeyCredential
}
// URL returns the URL endpoint used by the ContainerClient object.
func (c *ContainerClient) URL() string {
return c.client.endpoint
}
// NewContainerClient creates a ContainerClient object using the specified URL, Azure AD credential, and options.
func NewContainerClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*ContainerClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(containerURL, conOptions)
return &ContainerClient{
client: newContainerClient(conn.Endpoint(), conn.Pipeline()),
}, nil
}
// NewContainerClientWithNoCredential creates a ContainerClient object using the specified URL and options.
func NewContainerClientWithNoCredential(containerURL string, options *ClientOptions) (*ContainerClient, error) {
conOptions := getConnectionOptions(options)
conn := newConnection(containerURL, conOptions)
return &ContainerClient{
client: newContainerClient(conn.Endpoint(), conn.Pipeline()),
}, nil
}
// NewContainerClientWithSharedKey creates a ContainerClient object using the specified URL, shared key, and options.
func NewContainerClientWithSharedKey(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*ContainerClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(containerURL, conOptions)
return &ContainerClient{
client: newContainerClient(conn.Endpoint(), conn.Pipeline()),
sharedKey: cred,
}, nil
}
// NewContainerClientFromConnectionString creates a ContainerClient object using connection string of an account
func NewContainerClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*ContainerClient, error) {
svcClient, err := NewServiceClientFromConnectionString(connectionString, options)
if err != nil {
return nil, err
}
return svcClient.NewContainerClient(containerName)
}
// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of
// ContainerClient's URL. The new BlobClient uses the same request policy pipeline as the ContainerClient.
// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's
// NewBlobClient method.
func (c *ContainerClient) NewBlobClient(blobName string) (*BlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
return &BlobClient{
client: newBlobClient(blobURL, c.client.pl),
sharedKey: c.sharedKey,
}, nil
}
// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of
// ContainerClient's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerClient.
// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's
// NewAppendBlobClient method.
func (c *ContainerClient) NewAppendBlobClient(blobName string) (*AppendBlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
return &AppendBlobClient{
BlobClient: BlobClient{
client: newBlobClient(blobURL, c.client.pl),
sharedKey: c.sharedKey,
},
client: newAppendBlobClient(blobURL, c.client.pl),
}, nil
}
// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of
// ContainerClient's URL. The new BlockBlobClient uses the same request policy pipeline as the ContainerClient.
// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's
// NewBlockBlobClient method.
func (c *ContainerClient) NewBlockBlobClient(blobName string) (*BlockBlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
return &BlockBlobClient{
BlobClient: BlobClient{
client: newBlobClient(blobURL, c.client.pl),
sharedKey: c.sharedKey,
},
client: newBlockBlobClient(blobURL, c.client.pl),
}, nil
}
// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of ContainerClient's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerClient.
// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the
// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's
// NewPageBlobClient method.
func (c *ContainerClient) NewPageBlobClient(blobName string) (*PageBlobClient, error) {
blobURL := appendToURLPath(c.URL(), blobName)
return &PageBlobClient{
BlobClient: BlobClient{
client: newBlobClient(blobURL, c.client.pl),
sharedKey: c.sharedKey,
},
client: newPageBlobClient(blobURL, c.client.pl),
}, nil
}
// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container.
func (c *ContainerClient) Create(ctx context.Context, options *ContainerCreateOptions) (ContainerCreateResponse, error) {
basics, cpkInfo := options.format()
resp, err := c.client.Create(ctx, basics, cpkInfo)
return toContainerCreateResponse(resp), handleError(err)
}
// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container.
func (c *ContainerClient) Delete(ctx context.Context, o *ContainerDeleteOptions) (ContainerDeleteResponse, error) {
basics, leaseInfo, accessConditions := o.format()
resp, err := c.client.Delete(ctx, basics, leaseInfo, accessConditions)
return toContainerDeleteResponse(resp), handleError(err)
}
// GetProperties returns the container's properties.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata.
func (c *ContainerClient) GetProperties(ctx context.Context, o *ContainerGetPropertiesOptions) (ContainerGetPropertiesResponse, error) {
// NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties.
// This allows us to not expose a GetProperties method at all simplifying the API.
// The optionals are nil, like they were in track 1.5
options, leaseAccess := o.format()
resp, err := c.client.GetProperties(ctx, options, leaseAccess)
return toContainerGetPropertiesResponse(resp), handleError(err)
}
// SetMetadata sets the container's metadata.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata.
func (c *ContainerClient) SetMetadata(ctx context.Context, o *ContainerSetMetadataOptions) (ContainerSetMetadataResponse, error) {
metadataOptions, lac, mac := o.format()
resp, err := c.client.SetMetadata(ctx, metadataOptions, lac, mac)
return toContainerSetMetadataResponse(resp), handleError(err)
}
// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl.
func (c *ContainerClient) GetAccessPolicy(ctx context.Context, o *ContainerGetAccessPolicyOptions) (ContainerGetAccessPolicyResponse, error) {
options, ac := o.format()
resp, err := c.client.GetAccessPolicy(ctx, options, ac)
return toContainerGetAccessPolicyResponse(resp), handleError(err)
}
// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl.
func (c *ContainerClient) SetAccessPolicy(ctx context.Context, o *ContainerSetAccessPolicyOptions) (ContainerSetAccessPolicyResponse, error) {
accessPolicy, mac, lac := o.format()
resp, err := c.client.SetAccessPolicy(ctx, accessPolicy, mac, lac)
return toContainerSetAccessPolicyResponse(resp), handleError(err)
}
// ListBlobsFlat returns a pager for blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
func (c *ContainerClient) ListBlobsFlat(o *ContainerListBlobsFlatOptions) *ContainerListBlobFlatPager {
listOptions := o.format()
pager := c.client.ListBlobFlatSegment(listOptions)
// override the advancer
pager.advancer = func(ctx context.Context, response containerClientListBlobFlatSegmentResponse) (*policy.Request, error) {
listOptions.Marker = response.NextMarker
return c.client.listBlobFlatSegmentCreateRequest(ctx, listOptions)
}
return toContainerListBlobFlatSegmentPager(pager)
}
// ListBlobsHierarchy returns a channel of blobs starting from the specified Marker. Use an empty
// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order.
// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the
// previously-returned Marker) to get the next segment.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs.
// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored.
// AutoPagerBufferSize specifies the channel's buffer size.
// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.)
func (c *ContainerClient) ListBlobsHierarchy(delimiter string, o *ContainerListBlobsHierarchyOptions) *ContainerListBlobHierarchyPager {
listOptions := o.format()
pager := c.client.ListBlobHierarchySegment(delimiter, listOptions)
// override the advancer
pager.advancer = func(ctx context.Context, response containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) {
listOptions.Marker = response.NextMarker
return c.client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, listOptions)
}
return toContainerListBlobHierarchySegmentPager(pager)
}
// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container.
// It can only be used if the credential supplied during creation was a SharedKeyCredential.
func (c *ContainerClient) GetSASURL(permissions ContainerSASPermissions, start time.Time, expiry time.Time) (string, error) {
if c.sharedKey == nil {
return "", errors.New("SAS can only be signed with a SharedKeyCredential")
}
urlParts, err := NewBlobURLParts(c.URL())
if err != nil {
return "", err
}
// Containers do not have snapshots, nor versions.
urlParts.SAS, err = BlobSASSignatureValues{
ContainerName: urlParts.ContainerName,
Permissions: permissions.String(),
StartTime: start.UTC(),
ExpiryTime: expiry.UTC(),
}.NewSASQueryParameters(c.sharedKey)
return urlParts.URL(), err
}

View file

@ -1,102 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"errors"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
)
//ContainerLeaseClient represents lease client of container
type ContainerLeaseClient struct {
ContainerClient
leaseID *string
}
// NewContainerLeaseClient is constructor of ContainerLeaseClient
func (c *ContainerClient) NewContainerLeaseClient(leaseID *string) (*ContainerLeaseClient, error) {
if leaseID == nil {
generatedUuid, err := uuid.New()
if err != nil {
return nil, err
}
leaseID = to.Ptr(generatedUuid.String())
}
return &ContainerLeaseClient{
ContainerClient: *c,
leaseID: leaseID,
}, nil
}
// AcquireLease acquires a lease on the container for delete operations. The lease Duration must be between 15 to 60 seconds, or infinite (-1).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (clc *ContainerLeaseClient) AcquireLease(ctx context.Context, options *ContainerAcquireLeaseOptions) (ContainerAcquireLeaseResponse, error) {
containerAcquireLeaseOptions, modifiedAccessConditions := options.format()
containerAcquireLeaseOptions.ProposedLeaseID = clc.leaseID
resp, err := clc.client.AcquireLease(ctx, &containerAcquireLeaseOptions, modifiedAccessConditions)
if err == nil && resp.LeaseID != nil {
clc.leaseID = resp.LeaseID
}
return toContainerAcquireLeaseResponse(resp), handleError(err)
}
// BreakLease breaks the container's previously-acquired lease (if it exists).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (clc *ContainerLeaseClient) BreakLease(ctx context.Context, options *ContainerBreakLeaseOptions) (ContainerBreakLeaseResponse, error) {
containerBreakLeaseOptions, modifiedAccessConditions := options.format()
resp, err := clc.client.BreakLease(ctx, containerBreakLeaseOptions, modifiedAccessConditions)
return toContainerBreakLeaseResponse(resp), handleError(err)
}
// ChangeLease changes the container's lease ID.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (clc *ContainerLeaseClient) ChangeLease(ctx context.Context, options *ContainerChangeLeaseOptions) (ContainerChangeLeaseResponse, error) {
if clc.leaseID == nil {
return ContainerChangeLeaseResponse{}, errors.New("leaseID cannot be nil")
}
proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format()
if err != nil {
return ContainerChangeLeaseResponse{}, err
}
resp, err := clc.client.ChangeLease(ctx, *clc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions)
if err == nil && resp.LeaseID != nil {
clc.leaseID = resp.LeaseID
}
return toContainerChangeLeaseResponse(resp), handleError(err)
}
// ReleaseLease releases the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (clc *ContainerLeaseClient) ReleaseLease(ctx context.Context, options *ContainerReleaseLeaseOptions) (ContainerReleaseLeaseResponse, error) {
if clc.leaseID == nil {
return ContainerReleaseLeaseResponse{}, errors.New("leaseID cannot be nil")
}
containerReleaseLeaseOptions, modifiedAccessConditions := options.format()
resp, err := clc.client.ReleaseLease(ctx, *clc.leaseID, containerReleaseLeaseOptions, modifiedAccessConditions)
return toContainerReleaseLeaseResponse(resp), handleError(err)
}
// RenewLease renews the container's previously-acquired lease.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container.
func (clc *ContainerLeaseClient) RenewLease(ctx context.Context, options *ContainerRenewLeaseOptions) (ContainerRenewLeaseResponse, error) {
if clc.leaseID == nil {
return ContainerRenewLeaseResponse{}, errors.New("leaseID cannot be nil")
}
renewLeaseBlobOptions, modifiedAccessConditions := options.format()
resp, err := clc.client.RenewLease(ctx, *clc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions)
if err == nil && resp.LeaseID != nil {
clc.leaseID = resp.LeaseID
}
return toContainerRenewLeaseResponse(resp), handleError(err)
}

View file

@ -1,261 +0,0 @@
//go:build go1.18
// +build go1.18
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package azblob
import (
"context"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"io"
"net/url"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
)
// PageBlobClient represents a client to an Azure Storage page blob;
type PageBlobClient struct {
BlobClient
client *pageBlobClient
}
// NewPageBlobClient creates a ServiceClient object using the specified URL, Azure AD credential, and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
func NewPageBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*PageBlobClient, error) {
authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &PageBlobClient{
client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
},
}, nil
}
// NewPageBlobClientWithNoCredential creates a ServiceClient object using the specified URL and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net?<SAS token>
func NewPageBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*PageBlobClient, error) {
conOptions := getConnectionOptions(options)
conn := newConnection(blobURL, conOptions)
return &PageBlobClient{
client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
},
}, nil
}
// NewPageBlobClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options.
// Example of serviceURL: https://<your_storage_account>.blob.core.windows.net
func NewPageBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*PageBlobClient, error) {
authPolicy := newSharedKeyCredPolicy(cred)
conOptions := getConnectionOptions(options)
conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy)
conn := newConnection(blobURL, conOptions)
return &PageBlobClient{
client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()),
BlobClient: BlobClient{
client: newBlobClient(conn.Endpoint(), conn.Pipeline()),
sharedKey: cred,
},
}, nil
}
// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the snapshot returning a URL to the base blob.
func (pb *PageBlobClient) WithSnapshot(snapshot string) (*PageBlobClient, error) {
p, err := NewBlobURLParts(pb.URL())
if err != nil {
return nil, err
}
p.Snapshot = snapshot
endpoint := p.URL()
pipeline := pb.client.pl
return &PageBlobClient{
client: newPageBlobClient(endpoint, pipeline),
BlobClient: BlobClient{
client: newBlobClient(endpoint, pipeline),
sharedKey: pb.sharedKey,
},
}, nil
}
// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp.
// Pass "" to remove the version returning a URL to the base blob.
func (pb *PageBlobClient) WithVersionID(versionID string) (*PageBlobClient, error) {
p, err := NewBlobURLParts(pb.URL())
if err != nil {
return nil, err
}
p.VersionID = versionID
endpoint := p.URL()
pipeline := pb.client.pl
return &PageBlobClient{
client: newPageBlobClient(endpoint, pipeline),
BlobClient: BlobClient{
client: newBlobClient(endpoint, pipeline),
sharedKey: pb.sharedKey,
},
}, nil
}
// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob.
func (pb *PageBlobClient) Create(ctx context.Context, size int64, o *PageBlobCreateOptions) (PageBlobCreateResponse, error) {
createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format()
resp, err := pb.client.Create(ctx, 0, size, createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
return toPageBlobCreateResponse(resp), handleError(err)
}
// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes.
// This method panics if the stream is not at position 0.
// Note that the http client closes the body stream after the request is sent to the service.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb *PageBlobClient) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *PageBlobUploadPagesOptions) (PageBlobUploadPagesResponse, error) {
count, err := validateSeekableStreamAt0AndGetCount(body)
if err != nil {
return PageBlobUploadPagesResponse{}, err
}
uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format()
resp, err := pb.client.UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions,
cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
return toPageBlobUploadPagesResponse(resp), handleError(err)
}
// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob.
// The sourceOffset specifies the start offset of source data to copy from.
// The destOffset specifies the start offset of data in page blob will be written to.
// The count must be a multiple of 512 bytes.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url.
func (pb *PageBlobClient) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64,
options *PageBlobUploadPagesFromURLOptions) (PageBlobUploadPagesFromURLResponse, error) {
uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := options.format()
resp, err := pb.client.UploadPagesFromURL(ctx, source, rangeToString(sourceOffset, count), 0,
rangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions,
sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
return toPageBlobUploadPagesFromURLResponse(resp), handleError(err)
}
// ClearPages frees the specified pages from the page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page.
func (pb *PageBlobClient) ClearPages(ctx context.Context, pageRange HttpRange, options *PageBlobClearPagesOptions) (PageBlobClearPagesResponse, error) {
clearOptions := &pageBlobClientClearPagesOptions{
Range: pageRange.format(),
}
leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format()
resp, err := pb.client.ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo,
cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
return toPageBlobClearPagesResponse(resp), handleError(err)
}
// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb *PageBlobClient) GetPageRanges(options *PageBlobGetPageRangesOptions) *PageBlobGetPageRangesPager {
getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions := options.format()
pageBlobGetPageRangesPager := pb.client.GetPageRanges(getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions)
// Fixing Advancer
pageBlobGetPageRangesPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesResponse) (*policy.Request, error) {
getPageRangesOptions.Marker = response.NextMarker
req, err := pb.client.getPageRangesCreateRequest(ctx, getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
return nil, handleError(err)
}
queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery)
if err != nil {
return nil, handleError(err)
}
req.Raw().URL.RawQuery = queryValues.Encode()
return req, nil
}
return toPageBlobGetPageRangesPager(pageBlobGetPageRangesPager)
}
// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges.
func (pb *PageBlobClient) GetPageRangesDiff(options *PageBlobGetPageRangesDiffOptions) *PageBlobGetPageRangesDiffPager {
getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions := options.format()
getPageRangesDiffPager := pb.client.GetPageRangesDiff(getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions)
// Fixing Advancer
getPageRangesDiffPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) {
getPageRangesDiffOptions.Marker = response.NextMarker
req, err := pb.client.getPageRangesDiffCreateRequest(ctx, getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions)
if err != nil {
return nil, handleError(err)
}
queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery)
if err != nil {
return nil, handleError(err)
}
req.Raw().URL.RawQuery = queryValues.Encode()
return req, nil
}
return toPageBlobGetPageRangesDiffPager(getPageRangesDiffPager)
}
// Resize resizes the page blob to the specified size (which must be a multiple of 512).
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
func (pb *PageBlobClient) Resize(ctx context.Context, size int64, options *PageBlobResizeOptions) (PageBlobResizeResponse, error) {
resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format()
resp, err := pb.client.Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
return toPageBlobResizeResponse(resp), handleError(err)
}
// UpdateSequenceNumber sets the page blob's sequence number.
func (pb *PageBlobClient) UpdateSequenceNumber(ctx context.Context, options *PageBlobUpdateSequenceNumberOptions) (PageBlobUpdateSequenceNumberResponse, error) {
actionType, updateOptions, lac, mac := options.format()
resp, err := pb.client.UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac)
return toPageBlobUpdateSequenceNumberResponse(resp), handleError(err)
}
// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob.
// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination.
// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and
// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots.
func (pb *PageBlobClient) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *PageBlobCopyIncrementalOptions) (PageBlobCopyIncrementalResponse, error) {
copySourceURL, err := url.Parse(copySource)
if err != nil {
return PageBlobCopyIncrementalResponse{}, err
}
queryParams := copySourceURL.Query()
queryParams.Set("snapshot", prevSnapshot)
copySourceURL.RawQuery = queryParams.Encode()
pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format()
resp, err := pb.client.CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions)
return toPageBlobCopyIncrementalResponse(resp), handleError(err)
}

Some files were not shown because too many files have changed in this diff Show more