From 262ce77e2d8fba0d4c7187654b7fbd255e7e9e3d Mon Sep 17 00:00:00 2001 From: Zakhar Bessarab Date: Thu, 6 Oct 2022 00:10:00 +0300 Subject: [PATCH] lib/backup: add support of Azure Blob Storage (#460) * lib/backup: add support of Azure Blob Storage * lib/backup: add enterprise support of Azure Blob Storage --- app/vmbackup/README.md | 1 + go.mod | 3 + go.sum | 16 +- lib/backup/actions/util.go | 18 +- lib/backup/azremote/azblob.go | 405 +++ .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 456 +++ .../azure-sdk-for-go/sdk/azcore/LICENSE.txt | 21 + .../azure-sdk-for-go/sdk/azcore/README.md | 39 + .../Azure/azure-sdk-for-go/sdk/azcore/ci.yml | 29 + .../sdk/azcore/cloud/cloud.go | 44 + .../azure-sdk-for-go/sdk/azcore/cloud/doc.go | 53 + .../Azure/azure-sdk-for-go/sdk/azcore/core.go | 75 + .../Azure/azure-sdk-for-go/sdk/azcore/doc.go | 257 ++ .../azure-sdk-for-go/sdk/azcore/errors.go | 14 + .../Azure/azure-sdk-for-go/sdk/azcore/etag.go | 48 + .../sdk/azcore/internal/exported/exported.go | 61 + .../sdk/azcore/internal/exported/pipeline.go | 97 + .../sdk/azcore/internal/exported/request.go | 156 + .../internal/exported/response_error.go | 142 + .../sdk/azcore/internal/log/log.go | 38 + .../azcore/internal/pollers/async/async.go | 147 + .../sdk/azcore/internal/pollers/body/body.go | 130 + .../sdk/azcore/internal/pollers/loc/loc.go | 111 + .../sdk/azcore/internal/pollers/op/op.go | 140 + .../sdk/azcore/internal/pollers/poller.go | 24 + .../sdk/azcore/internal/pollers/util.go | 317 ++ .../sdk/azcore/internal/shared/constants.go | 34 + .../sdk/azcore/internal/shared/shared.go | 135 + .../azure-sdk-for-go/sdk/azcore/log/doc.go | 10 + .../azure-sdk-for-go/sdk/azcore/log/log.go | 50 + .../azure-sdk-for-go/sdk/azcore/policy/doc.go | 10 + .../sdk/azcore/policy/policy.go | 119 + .../sdk/azcore/runtime/doc.go | 10 + .../sdk/azcore/runtime/errors.go | 19 + .../sdk/azcore/runtime/pager.go | 77 + .../sdk/azcore/runtime/pipeline.go | 73 + .../sdk/azcore/runtime/policy_bearer_token.go | 64 + .../azcore/runtime/policy_body_download.go | 73 + .../sdk/azcore/runtime/policy_http_header.go | 39 + .../azcore/runtime/policy_include_response.go | 34 + .../sdk/azcore/runtime/policy_logging.go | 251 ++ .../sdk/azcore/runtime/policy_request_id.go | 34 + .../sdk/azcore/runtime/policy_retry.go | 242 ++ .../sdk/azcore/runtime/policy_telemetry.go | 79 + .../sdk/azcore/runtime/poller.go | 324 ++ .../sdk/azcore/runtime/request.go | 225 ++ .../sdk/azcore/runtime/response.go | 137 + .../runtime/transport_default_http_client.go | 37 + .../sdk/azcore/streaming/doc.go | 9 + .../sdk/azcore/streaming/progress.go | 72 + .../azure-sdk-for-go/sdk/azcore/to/doc.go | 9 + .../azure-sdk-for-go/sdk/azcore/to/to.go | 21 + .../azure-sdk-for-go/sdk/internal/LICENSE.txt | 21 + .../sdk/internal/diag/diag.go | 51 + .../azure-sdk-for-go/sdk/internal/diag/doc.go | 7 + .../sdk/internal/errorinfo/doc.go | 7 + .../sdk/internal/errorinfo/errorinfo.go | 16 + .../azure-sdk-for-go/sdk/internal/log/doc.go | 7 + .../azure-sdk-for-go/sdk/internal/log/log.go | 104 + .../sdk/internal/temporal/resource.go | 120 + .../azure-sdk-for-go/sdk/internal/uuid/doc.go | 7 + .../sdk/internal/uuid/uuid.go | 76 + .../sdk/storage/azblob/CHANGELOG.md | 54 + .../sdk/storage/azblob/LICENSE.txt | 21 + .../sdk/storage/azblob/README.md | 397 +++ .../sdk/storage/azblob/autorest.md | 171 + .../sdk/storage/azblob/bytes_writer.go | 30 + .../sdk/storage/azblob/chunkwriting.go | 231 ++ .../sdk/storage/azblob/ci.yml | 28 + .../sdk/storage/azblob/connection.go | 39 + .../sdk/storage/azblob/constants.go | 46 + .../sdk/storage/azblob/doc.go | 214 ++ .../sdk/storage/azblob/highlevel.go | 316 ++ .../sdk/storage/azblob/internal/zc_shared.go | 150 + .../sdk/storage/azblob/section_writer.go | 53 + .../sdk/storage/azblob/transfer_manager.go | 154 + .../sdk/storage/azblob/zc_access_policy.go | 67 + .../storage/azblob/zc_append_blob_client.go | 154 + .../sdk/storage/azblob/zc_blob_client.go | 278 ++ .../storage/azblob/zc_blob_lease_client.go | 98 + .../storage/azblob/zc_block_blob_client.go | 201 ++ .../storage/azblob/zc_connection_string.go | 88 + .../sdk/storage/azblob/zc_container_client.go | 253 ++ .../azblob/zc_container_lease_client.go | 102 + .../sdk/storage/azblob/zc_page_blob_client.go | 261 ++ .../sdk/storage/azblob/zc_parsing_urls.go | 184 ++ .../sdk/storage/azblob/zc_response_error.go | 17 + .../sdk/storage/azblob/zc_response_helpers.go | 35 + .../sdk/storage/azblob/zc_retry_reader.go | 194 ++ .../sdk/storage/azblob/zc_sas_account.go | 243 ++ .../sdk/storage/azblob/zc_sas_query_params.go | 427 +++ .../sdk/storage/azblob/zc_sas_service.go | 365 +++ .../sdk/storage/azblob/zc_service_client.go | 266 ++ .../zc_shared_policy_shared_key_credential.go | 197 ++ .../sdk/storage/azblob/zc_storage_error.go | 236 ++ .../sdk/storage/azblob/zc_validators.go | 107 + .../storage/azblob/zm_access_conditions.go | 43 + .../azblob/zm_append_blob_client_util.go | 184 ++ .../sdk/storage/azblob/zm_blob_client_util.go | 478 +++ .../azblob/zm_blob_lease_client_util.go | 160 + .../azblob/zm_block_blob_client_util.go | 272 ++ .../sdk/storage/azblob/zm_client_util.go | 55 + .../azblob/zm_container_client_util.go | 271 ++ .../azblob/zm_container_lease_client_util.go | 166 + .../sdk/storage/azblob/zm_highlevel_util.go | 201 ++ .../azblob/zm_page_blob_client_util.go | 402 +++ .../zm_serialize_and_desearilize_util.go | 68 + .../storage/azblob/zm_service_client_util.go | 226 ++ .../azblob/zz_generated_appendblob_client.go | 648 ++++ .../azblob/zz_generated_blob_client.go | 2831 +++++++++++++++++ .../azblob/zz_generated_blockblob_client.go | 953 ++++++ .../storage/azblob/zz_generated_constants.go | 841 +++++ .../azblob/zz_generated_container_client.go | 1442 +++++++++ .../sdk/storage/azblob/zz_generated_models.go | 2158 +++++++++++++ .../azblob/zz_generated_pageblob_client.go | 1247 ++++++++ .../sdk/storage/azblob/zz_generated_pagers.go | 287 ++ .../azblob/zz_generated_response_types.go | 2434 ++++++++++++++ .../azblob/zz_generated_service_client.go | 551 ++++ .../azblob/zz_generated_time_rfc1123.go | 42 + .../azblob/zz_generated_time_rfc3339.go | 58 + .../storage/azblob/zz_generated_xml_helper.go | 40 + vendor/modules.txt | 28 + 122 files changed, 27206 insertions(+), 2 deletions(-) create mode 100644 lib/backup/azremote/azblob.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go diff --git a/app/vmbackup/README.md b/app/vmbackup/README.md index d298ec56b..ffe6ee64d 100644 --- a/app/vmbackup/README.md +++ b/app/vmbackup/README.md @@ -6,6 +6,7 @@ Supported storage systems for backups: * [GCS](https://cloud.google.com/storage/). Example: `gs:///` * [S3](https://aws.amazon.com/s3/). Example: `s3:///` +* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob:///` * Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details. * Local filesystem. Example: `fs://`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`. diff --git a/go.mod b/go.mod index 111239351..abb181b36 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ go 1.19 require ( cloud.google.com/go/storage v1.27.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 github.com/VictoriaMetrics/fastcache v1.12.0 // Do not use the original github.com/valyala/fasthttp because of issues @@ -39,6 +40,8 @@ require ( cloud.google.com/go v0.104.0 // indirect cloud.google.com/go/compute v1.10.0 // indirect cloud.google.com/go/iam v0.5.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.8 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.12.21 // indirect diff --git a/go.sum b/go.sum index e68e8c471..39ad4223e 100644 --- a/go.sum +++ b/go.sum @@ -66,7 +66,15 @@ cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgy cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v48.2.0+incompatible h1:+t2P1j1r5N6lYgPiiz7ZbEVZFkWjVe9WhHbMm0gg8hw= github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0 h1:sVPhtT2qjO86rTUaWMr4WoES4TkjGnzcioXcnHV9s5k= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.0.0 h1:Yoicul8bnVdQrhDMTHxdEckRGX01XvwXDHUT9zYZ3k0= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 h1:jp0dGvZ7ZK0mgqnTSClMxa5xuRL7NZgHameVYF6BurY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 h1:QSdcrd/UFJv6Bp/CfoVf2SrENpFn9P6Yh8yb+xNhYMM= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1/go.mod h1:eZ4g6GUvXiGulfIbbhh1Xr4XwUYaYaWMqzGD/284wCA= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= @@ -88,6 +96,7 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/AzureAD/microsoft-authentication-library-for-go v0.4.0 h1:WVsrXCnHlDDX8ls+tootqRE87/hL9S/g4ewig9RsD/c= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -241,6 +250,7 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/digitalocean/godo v1.52.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -412,6 +422,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -632,6 +643,7 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= @@ -749,6 +761,7 @@ github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHu github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -861,8 +874,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -954,6 +967,7 @@ golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20220511200225-c6db032c6c88 h1:Tgea0cVUD0ivh5ADBX4WwuI12DUd2to3nCYe2eayMIw= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= diff --git a/lib/backup/actions/util.go b/lib/backup/actions/util.go index 7150074a4..a49cd9c53 100644 --- a/lib/backup/actions/util.go +++ b/lib/backup/actions/util.go @@ -7,6 +7,7 @@ import ( "sync" "time" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/azremote" "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common" "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fsremote" "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/gcsremote" @@ -183,7 +184,7 @@ func NewRemoteFS(path string) (common.RemoteFS, error) { } n := strings.Index(path, "://") if n < 0 { - return nil, fmt.Errorf("Missing scheme in path %q. Supported schemes: `gs://`, `s3://`, `fs://`", path) + return nil, fmt.Errorf("Missing scheme in path %q. Supported schemes: `gs://`, `s3://`, `azblob://`, `fs://`", path) } scheme := path[:n] dir := path[n+len("://"):] @@ -212,6 +213,21 @@ func NewRemoteFS(path string) (common.RemoteFS, error) { return nil, fmt.Errorf("cannot initialize connection to gcs: %w", err) } return fs, nil + case "azblob": + n := strings.Index(dir, "/") + if n < 0 { + return nil, fmt.Errorf("missing directory on the AZBlob container %q", dir) + } + bucket := dir[:n] + dir = dir[n:] + fs := &azremote.FS{ + Container: bucket, + Dir: dir, + } + if err := fs.Init(); err != nil { + return nil, fmt.Errorf("cannot initialize connection to AZBlob: %w", err) + } + return fs, nil case "s3": n := strings.Index(dir, "/") if n < 0 { diff --git a/lib/backup/azremote/azblob.go b/lib/backup/azremote/azblob.go new file mode 100644 index 000000000..1d3260e00 --- /dev/null +++ b/lib/backup/azremote/azblob.go @@ -0,0 +1,405 @@ +package azremote + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fscommon" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" +) + +const ( + envStorageAcctName = "AZURE_STORAGE_ACCOUNT_NAME" + envStorageAccKey = "AZURE_STORAGE_ACCOUNT_KEY" + envStorageAccCs = "AZURE_STORAGE_ACCOUNT_CONNECTION_STRING" +) + +// FS represents filesystem for backups in Azure Blob Storage. +// +// Init must be called before calling other FS methods. +type FS struct { + // Azure Blob Storage bucket to use. + Container string + + // Directory in the bucket to write to. + Dir string + + client *azblob.ContainerClient +} + +// Init initializes fs. +// +// The returned fs must be stopped when no long needed with MustStop call. +func (fs *FS) Init() error { + if fs.client != nil { + logger.Panicf("BUG: fs.Init has been already called") + } + + for strings.HasPrefix(fs.Dir, "/") { + fs.Dir = fs.Dir[1:] + } + if !strings.HasSuffix(fs.Dir, "/") { + fs.Dir += "/" + } + + var sc *azblob.ServiceClient + var err error + if cs, ok := os.LookupEnv(envStorageAccCs); ok { + sc, err = azblob.NewServiceClientFromConnectionString(cs, nil) + if err != nil { + return fmt.Errorf("failed to create AZBlob service client from connection string: %w", err) + } + } + + accountName, ok1 := os.LookupEnv(envStorageAcctName) + accountKey, ok2 := os.LookupEnv(envStorageAccKey) + if ok1 && ok2 { + creds, err := azblob.NewSharedKeyCredential(accountName, accountKey) + if err != nil { + return fmt.Errorf("failed to create AZBlob credentials from account name and key: %w", err) + } + serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) + + sc, err = azblob.NewServiceClientWithSharedKey(serviceURL, creds, nil) + if err != nil { + return fmt.Errorf("failed to create AZBlob service client from account name and key: %w", err) + } + } + + if sc == nil { + return fmt.Errorf(`failed to detect any credentials type for AZBlob. Ensure there is connection string set at %q, or shared key at %q and %q`, envStorageAccCs, envStorageAcctName, envStorageAccKey) + } + + containerClient, err := sc.NewContainerClient(fs.Container) + if err != nil { + return fmt.Errorf("failed to create AZBlob container client: %w", err) + } + + fs.client = containerClient + + return nil +} + +// MustStop stops fs. +func (fs *FS) MustStop() { + fs.client = nil +} + +// String returns human-readable description for fs. +func (fs *FS) String() string { + return fmt.Sprintf("AZBlob{container: %q, dir: %q}", fs.Container, fs.Dir) +} + +// ListParts returns all the parts for fs. +func (fs *FS) ListParts() ([]common.Part, error) { + dir := fs.Dir + ctx := context.Background() + + opts := &azblob.ContainerListBlobsFlatOptions{ + Prefix: &dir, + } + + pager := fs.client.ListBlobsFlat(opts) + var parts []common.Part + for pager.NextPage(ctx) { + resp := pager.PageResponse() + + for _, v := range resp.Segment.BlobItems { + file := *v.Name + if !strings.HasPrefix(file, dir) { + return nil, fmt.Errorf("unexpected prefix for AZBlob key %q; want %q", file, dir) + } + if fscommon.IgnorePath(file) { + continue + } + var p common.Part + if !p.ParseFromRemotePath(file[len(dir):]) { + logger.Infof("skipping unknown object %q", file) + continue + } + + p.ActualSize = uint64(*v.Properties.ContentLength) + parts = append(parts, p) + } + + } + + if err := pager.Err(); err != nil { + return nil, fmt.Errorf("error when iterating objects at %q: %w", dir, err) + } + + return parts, nil +} + +// DeletePart deletes part p from fs. +func (fs *FS) DeletePart(p common.Part) error { + bc, err := fs.clientForPart(p) + if err != nil { + return err + } + ctx := context.Background() + if _, err := bc.Delete(ctx, &azblob.BlobDeleteOptions{}); err != nil { + return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", p.Path, fs, bc.URL(), err) + } + return nil +} + +// RemoveEmptyDirs recursively removes empty dirs in fs. +func (fs *FS) RemoveEmptyDirs() error { + // Blob storage has no directories, so nothing to remove. + return nil +} + +// CopyPart copies p from srcFS to fs. +func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error { + src, ok := srcFS.(*FS) + if !ok { + return fmt.Errorf("cannot perform server-side copying from %s to %s: both of them must be AZBlob", srcFS, fs) + } + + sbc, err := src.clientForPart(p) + if err != nil { + return fmt.Errorf("failed to initialize server-side copy of src %q: %w", p.Path, err) + } + dbc, err := fs.clientForPart(p) + if err != nil { + return fmt.Errorf("failed to initialize server-side copy of dst %q: %w", p.Path, err) + } + + ssCopyPermission := azblob.BlobSASPermissions{ + Read: true, + Create: true, + Write: true, + } + t, err := sbc.GetSASToken(ssCopyPermission, time.Now(), time.Now().Add(30*time.Minute)) + if err != nil { + return fmt.Errorf("failed to generate SAS token of src %q: %w", p.Path, err) + } + + srcURL := sbc.URL() + "?" + t.Encode() + + ctx := context.Background() + _, err = dbc.CopyFromURL(ctx, srcURL, &azblob.BlockBlobCopyFromURLOptions{}) + if err != nil { + return fmt.Errorf("cannot copy %q from %s to %s: %w", p.Path, src, fs, err) + } + + return nil +} + +// DownloadPart downloads part p from fs to w. +func (fs *FS) DownloadPart(p common.Part, w io.Writer) error { + bc, err := fs.clientForPart(p) + if err != nil { + return err + } + + ctx := context.Background() + r, err := bc.Download(ctx, &azblob.BlobDownloadOptions{}) + if err != nil { + return fmt.Errorf("cannot open reader for %q at %s (remote path %q): %w", p.Path, fs, bc.URL(), err) + } + + body := r.Body(&azblob.RetryReaderOptions{}) + n, err := io.Copy(w, body) + if err1 := body.Close(); err1 != nil && err == nil { + err = err1 + } + if err != nil { + return fmt.Errorf("cannot download %q from at %s (remote path %q): %w", p.Path, fs, bc.URL(), err) + } + if uint64(n) != p.Size { + return fmt.Errorf("wrong data size downloaded from %q at %s; got %d bytes; want %d bytes", p.Path, fs, n, p.Size) + } + return nil +} + +// UploadPart uploads part p from r to fs. +func (fs *FS) UploadPart(p common.Part, r io.Reader) error { + bc, err := fs.clientForPart(p) + if err != nil { + return err + } + + ctx := context.Background() + _, err = bc.UploadStream(ctx, r, azblob.UploadStreamOptions{}) + + if err != nil { + return fmt.Errorf("cannot upload data to %q at %s (remote path %q): %w", p.Path, fs, bc.URL(), err) + } + + return nil +} + +func (fs *FS) clientForPart(p common.Part) (*azblob.BlockBlobClient, error) { + path := p.RemotePath(fs.Dir) + + return fs.clientForPath(path) +} + +func (fs *FS) clientForPath(path string) (*azblob.BlockBlobClient, error) { + bc, err := fs.client.NewBlockBlobClient(path) + if err != nil { + return nil, fmt.Errorf("unexpected error when creating client for blob %q: %w", path, err) + } + + return bc, nil +} + +// DeleteFile deletes filePath at fs if it exists. +// +// The function does nothing if the filePath doesn't exists. +func (fs *FS) DeleteFile(filePath string) error { + v, err := fs.HasFile(filePath) + if err != nil { + return err + } + if !v { + return nil + } + + path := fs.Dir + filePath + bc, err := fs.clientForPath(path) + if err != nil { + return err + } + + ctx := context.Background() + if _, err := bc.Delete(ctx, nil); err != nil { + return fmt.Errorf("cannot delete %q at %s (remote path %q): %w", filePath, fs, bc.URL(), err) + } + return nil +} + +// CreateFile creates filePath at fs and puts data into it. +// +// The file is overwritten if it exists. +func (fs *FS) CreateFile(filePath string, data []byte) error { + path := fs.Dir + filePath + bc, err := fs.clientForPath(path) + if err != nil { + return err + } + + ctx := context.Background() + r, err := bc.UploadBuffer(ctx, data, azblob.UploadOption{ + Parallelism: 1, + }) + defer func() { _ = r.Body.Close() }() + + if err != nil { + return fmt.Errorf("cannot upload %d bytes to %q at %s (remote path %q): %w", len(data), filePath, fs, bc.URL(), err) + } + + return nil +} + +// HasFile returns ture if filePath exists at fs. +func (fs *FS) HasFile(filePath string) (bool, error) { + path := fs.Dir + filePath + + bc, err := fs.clientForPath(path) + if err != nil { + return false, err + } + + ctx := context.Background() + _, err = bc.GetProperties(ctx, nil) + + var azerr *azblob.InternalError + var sterr *azblob.StorageError + + if errors.As(err, &azerr) && azerr.As(&sterr) { + if sterr.ErrorCode == azblob.StorageErrorCodeBlobNotFound { + return false, nil + } + return false, fmt.Errorf("unexpected error when obtaining properties for %q at %s (remote path %q): %w", filePath, fs, bc.URL(), err) + } + + return true, nil +} + +// ListDirs returns list of subdirectories in given directory +func (fs *FS) ListDirs(subpath string) ([]string, error) { + path := strings.TrimPrefix(filepath.Join(fs.Dir, subpath), "/") + if path != "" && !strings.HasSuffix(path, "/") { + path += "/" + } + + var dirs []string + + const dirsDelimiter = "/" + pager := fs.client.ListBlobsHierarchy(dirsDelimiter, &azblob.ContainerListBlobsHierarchyOptions{ + Prefix: &fs.Container, + }) + ctx := context.Background() + for pager.NextPage(ctx) { + resp := pager.PageResponse() + + const dirsDelimiter = "/" + + for _, v := range resp.Segment.BlobPrefixes { + dir := *v.Name + if !strings.HasPrefix(dir, path) { + return nil, fmt.Errorf("unexpected prefix for AZBlob key %q; want %q", dir, dir) + } + dir = strings.TrimPrefix(dir, path) + if fscommon.IgnorePath(dir) || !strings.Contains(dir, dirsDelimiter) { + continue + } + dirs = append(dirs, strings.TrimSuffix(dir, dirsDelimiter)) + } + } + + return dirs, nil +} + +// DeleteFiles deletes files at fs. +// +// The function does nothing if the files don't exist. +func (fs *FS) DeleteFiles(filePaths []string) error { + if len(filePaths) == 0 { + return nil + } + for _, filePath := range filePaths { + path := filePath + if fs.Dir != "/" { + path = filepath.Join(fs.Dir + path) + } + + ctx := context.Background() + + opts := &azblob.ContainerListBlobsFlatOptions{ + Prefix: &path, + } + + pager := fs.client.ListBlobsFlat(opts) + for pager.NextPage(ctx) { + resp := pager.PageResponse() + + for _, v := range resp.Segment.BlobItems { + file := *v.Name + + bc, err := fs.clientForPath(file) + if err != nil { + return err + } + + _, err = bc.Delete(ctx, &azblob.BlobDeleteOptions{}) + if err != nil { + return fmt.Errorf("cannot delete %q at %s (remote dir %q): %w", file, fs, fs.Dir, err) + } + } + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md new file mode 100644 index 000000000..6e961fafd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -0,0 +1,456 @@ +# Release History + +## 1.0.0 (2022-05-12) + +### Features Added +* Added interface `runtime.PollingHandler` to support custom poller implementations. + * Added field `PollingHandler` of this type to `runtime.NewPollerOptions[T]` and `runtime.NewPollerFromResumeTokenOptions[T]`. + +### Breaking Changes +* Renamed `cloud.Configuration.LoginEndpoint` to `.ActiveDirectoryAuthorityHost` +* Renamed `cloud.AzurePublicCloud` to `cloud.AzurePublic` +* Removed `AuxiliaryTenants` field from `arm/ClientOptions` and `arm/policy/BearerTokenOptions` +* Removed `TokenRequestOptions.TenantID` +* `Poller[T].PollUntilDone()` now takes an `options *PollUntilDoneOptions` param instead of `freq time.Duration` +* Removed `arm/runtime.Poller[T]`, `arm/runtime.NewPoller[T]()` and `arm/runtime.NewPollerFromResumeToken[T]()` +* Removed `arm/runtime.FinalStateVia` and related `const` values +* Renamed `runtime.PageProcessor` to `runtime.PagingHandler` +* The `arm/runtime.ProviderRepsonse` and `arm/runtime.Provider` types are no longer exported. +* Renamed `NewRequestIdPolicy()` to `NewRequestIDPolicy()` +* `TokenCredential.GetToken` now returns `AccessToken` by value. + +### Bugs Fixed +* When per-try timeouts are enabled, only cancel the context after the body has been read and closed. +* The `Operation-Location` poller now properly handles `final-state-via` values. +* Improvements in `runtime.Poller[T]` + * `Poll()` shouldn't cache errors, allowing for additional retries when in a non-terminal state. + * `Result()` will cache the terminal result or error but not transient errors, allowing for additional retries. + +### Other Changes +* Updated to latest `internal` module and absorbed breaking changes. + * Use `temporal.Resource` and deleted copy. +* The internal poller implementation has been refactored. + * The implementation in `internal/pollers/poller.go` has been merged into `runtime/poller.go` with some slight modification. + * The internal poller types had their methods updated to conform to the `runtime.PollingHandler` interface. + * The creation of resume tokens has been refactored so that implementers of `runtime.PollingHandler` don't need to know about it. +* `NewPipeline()` places policies from `ClientOptions` after policies from `PipelineOptions` +* Default User-Agent headers no longer include `azcore` version information + +## 0.23.1 (2022-04-14) + +### Bugs Fixed +* Include XML header when marshalling XML content. +* Handle XML namespaces when searching for error code. +* Handle `odata.error` when searching for error code. + +## 0.23.0 (2022-04-04) + +### Features Added +* Added `runtime.Pager[T any]` and `runtime.Poller[T any]` supporting types for central, generic, implementations. +* Added `cloud` package with a new API for cloud configuration +* Added `FinalStateVia` field to `runtime.NewPollerOptions[T any]` type. + +### Breaking Changes +* Removed the `Poller` type-alias to the internal poller implementation. +* Added `Ptr[T any]` and `SliceOfPtrs[T any]` in the `to` package and removed all non-generic implementations. +* `NullValue` and `IsNullValue` now take a generic type parameter instead of an interface func parameter. +* Replaced `arm.Endpoint` with `cloud` API + * Removed the `endpoint` parameter from `NewRPRegistrationPolicy()` + * `arm/runtime.NewPipeline()` and `.NewRPRegistrationPolicy()` now return an `error` +* Refactored `NewPoller` and `NewPollerFromResumeToken` funcs in `arm/runtime` and `runtime` packages. + * Removed the `pollerID` parameter as it's no longer required. + * Created optional parameter structs and moved optional parameters into them. +* Changed `FinalStateVia` field to a `const` type. + +### Other Changes +* Converted expiring resource and dependent types to use generics. + +## 0.22.0 (2022-03-03) + +### Features Added +* Added header `WWW-Authenticate` to the default allow-list of headers for logging. +* Added a pipeline policy that enables the retrieval of HTTP responses from API calls. + * Added `runtime.WithCaptureResponse` to enable the policy at the API level (off by default). + +### Breaking Changes +* Moved `WithHTTPHeader` and `WithRetryOptions` from the `policy` package to the `runtime` package. + +## 0.21.1 (2022-02-04) + +### Bugs Fixed +* Restore response body after reading in `Poller.FinalResponse()`. (#16911) +* Fixed bug in `NullValue` that could lead to incorrect comparisons for empty maps/slices (#16969) + +### Other Changes +* `BearerTokenPolicy` is more resilient to transient authentication failures. (#16789) + +## 0.21.0 (2022-01-11) + +### Features Added +* Added `AllowedHeaders` and `AllowedQueryParams` to `policy.LogOptions` to control which headers and query parameters are written to the logger. +* Added `azcore.ResponseError` type which is returned from APIs when a non-success HTTP status code is received. + +### Breaking Changes +* Moved `[]policy.Policy` parameters of `arm/runtime.NewPipeline` and `runtime.NewPipeline` into a new struct, `runtime.PipelineOptions` +* Renamed `arm/ClientOptions.Host` to `.Endpoint` +* Moved `Request.SkipBodyDownload` method to function `runtime.SkipBodyDownload` +* Removed `azcore.HTTPResponse` interface type +* `arm.NewPoller()` and `runtime.NewPoller()` no longer require an `eu` parameter +* `runtime.NewResponseError()` no longer requires an `error` parameter + +## 0.20.0 (2021-10-22) + +### Breaking Changes +* Removed `arm.Connection` +* Removed `azcore.Credential` and `.NewAnonymousCredential()` + * `NewRPRegistrationPolicy` now requires an `azcore.TokenCredential` +* `runtime.NewPipeline` has a new signature that simplifies implementing custom authentication +* `arm/runtime.RegistrationOptions` embeds `policy.ClientOptions` +* Contents in the `log` package have been slightly renamed. +* Removed `AuthenticationOptions` in favor of `policy.BearerTokenOptions` +* Changed parameters for `NewBearerTokenPolicy()` +* Moved policy config options out of `arm/runtime` and into `arm/policy` + +### Features Added +* Updating Documentation +* Added string typdef `arm.Endpoint` to provide a hint toward expected ARM client endpoints +* `azcore.ClientOptions` contains common pipeline configuration settings +* Added support for multi-tenant authorization in `arm/runtime` +* Require one second minimum when calling `PollUntilDone()` + +### Bug Fixes +* Fixed a potential panic when creating the default Transporter. +* Close LRO initial response body when creating a poller. +* Fixed a panic when recursively cloning structs that contain time.Time. + +## 0.19.0 (2021-08-25) + +### Breaking Changes +* Split content out of `azcore` into various packages. The intent is to separate content based on its usage (common, uncommon, SDK authors). + * `azcore` has all core functionality. + * `log` contains facilities for configuring in-box logging. + * `policy` is used for configuring pipeline options and creating custom pipeline policies. + * `runtime` contains various helpers used by SDK authors and generated content. + * `streaming` has helpers for streaming IO operations. +* `NewTelemetryPolicy()` now requires module and version parameters and the `Value` option has been removed. + * As a result, the `Request.Telemetry()` method has been removed. +* The telemetry policy now includes the SDK prefix `azsdk-go-` so callers no longer need to provide it. +* The `*http.Request` in `runtime.Request` is no longer anonymously embedded. Use the `Raw()` method to access it. +* The `UserAgent` and `Version` constants have been made internal, `Module` and `Version` respectively. + +### Bug Fixes +* Fixed an issue in the retry policy where the request body could be overwritten after a rewind. + +### Other Changes +* Moved modules `armcore` and `to` content into `arm` and `to` packages respectively. + * The `Pipeline()` method on `armcore.Connection` has been replaced by `NewPipeline()` in `arm.Connection`. It takes module and version parameters used by the telemetry policy. +* Poller logic has been consolidated across ARM and core implementations. + * This required some changes to the internal interfaces for core pollers. +* The core poller types have been improved, including more logging and test coverage. + +## 0.18.1 (2021-08-20) + +### Features Added +* Adds an `ETag` type for comparing etags and handling etags on requests +* Simplifies the `requestBodyProgess` and `responseBodyProgress` into a single `progress` object + +### Bugs Fixed +* `JoinPaths` will preserve query parameters encoded in the `root` url. + +### Other Changes +* Bumps dependency on `internal` module to the latest version (v0.7.0) + +## 0.18.0 (2021-07-29) +### Features Added +* Replaces methods from Logger type with two package methods for interacting with the logging functionality. +* `azcore.SetClassifications` replaces `azcore.Logger().SetClassifications` +* `azcore.SetListener` replaces `azcore.Logger().SetListener` + +### Breaking Changes +* Removes `Logger` type from `azcore` + + +## 0.17.0 (2021-07-27) +### Features Added +* Adding TenantID to TokenRequestOptions (https://github.com/Azure/azure-sdk-for-go/pull/14879) +* Adding AuxiliaryTenants to AuthenticationOptions (https://github.com/Azure/azure-sdk-for-go/pull/15123) + +### Breaking Changes +* Rename `AnonymousCredential` to `NewAnonymousCredential` (https://github.com/Azure/azure-sdk-for-go/pull/15104) +* rename `AuthenticationPolicyOptions` to `AuthenticationOptions` (https://github.com/Azure/azure-sdk-for-go/pull/15103) +* Make Header constants private (https://github.com/Azure/azure-sdk-for-go/pull/15038) + + +## 0.16.2 (2021-05-26) +### Features Added +* Improved support for byte arrays [#14715](https://github.com/Azure/azure-sdk-for-go/pull/14715) + + +## 0.16.1 (2021-05-19) +### Features Added +* Add license.txt to azcore module [#14682](https://github.com/Azure/azure-sdk-for-go/pull/14682) + + +## 0.16.0 (2021-05-07) +### Features Added +* Remove extra `*` in UnmarshalAsByteArray() [#14642](https://github.com/Azure/azure-sdk-for-go/pull/14642) + + +## 0.15.1 (2021-05-06) +### Features Added +* Cache the original request body on Request [#14634](https://github.com/Azure/azure-sdk-for-go/pull/14634) + + +## 0.15.0 (2021-05-05) +### Features Added +* Add support for null map and slice +* Export `Response.Payload` method + +### Breaking Changes +* remove `Response.UnmarshalError` as it's no longer required + + +## 0.14.5 (2021-04-23) +### Features Added +* Add `UnmarshalError()` on `azcore.Response` + + +## 0.14.4 (2021-04-22) +### Features Added +* Support for basic LRO polling +* Added type `LROPoller` and supporting types for basic polling on long running operations. +* rename poller param and added doc comment + +### Bugs Fixed +* Fixed content type detection bug in logging. + + +## 0.14.3 (2021-03-29) +### Features Added +* Add support for multi-part form data +* Added method `WriteMultipartFormData()` to Request. + + +## 0.14.2 (2021-03-17) +### Features Added +* Add support for encoding JSON null values +* Adds `NullValue()` and `IsNullValue()` functions for setting and detecting sentinel values used for encoding a JSON null. +* Documentation fixes + +### Bugs Fixed +* Fixed improper error wrapping + + +## 0.14.1 (2021-02-08) +### Features Added +* Add `Pager` and `Poller` interfaces to azcore + + +## 0.14.0 (2021-01-12) +### Features Added +* Accept zero-value options for default values +* Specify zero-value options structs to accept default values. +* Remove `DefaultXxxOptions()` methods. +* Do not silently change TryTimeout on negative values +* make per-try timeout opt-in + + +## 0.13.4 (2020-11-20) +### Features Added +* Include telemetry string in User Agent + + +## 0.13.3 (2020-11-20) +### Features Added +* Updating response body handling on `azcore.Response` + + +## 0.13.2 (2020-11-13) +### Features Added +* Remove implementation of stateless policies as first-class functions. + + +## 0.13.1 (2020-11-05) +### Features Added +* Add `Telemetry()` method to `azcore.Request()` + + +## 0.13.0 (2020-10-14) +### Features Added +* Rename `log` to `logger` to avoid name collision with the log package. +* Documentation improvements +* Simplified `DefaultHTTPClientTransport()` implementation + + +## 0.12.1 (2020-10-13) +### Features Added +* Update `internal` module dependence to `v0.5.0` + + +## 0.12.0 (2020-10-08) +### Features Added +* Removed storage specific content +* Removed internal content to prevent API clutter +* Refactored various policy options to conform with our options pattern + + +## 0.11.0 (2020-09-22) +### Features Added + +* Removed `LogError` and `LogSlowResponse`. +* Renamed `options` in `RequestLogOptions`. +* Updated `NewRequestLogPolicy()` to follow standard pattern for options. +* Refactored `requestLogPolicy.Do()` per above changes. +* Cleaned up/added logging in retry policy. +* Export `NewResponseError()` +* Fix `RequestLogOptions` comment + + +## 0.10.1 (2020-09-17) +### Features Added +* Add default console logger +* Default console logger writes to stderr. To enable it, set env var `AZURE_SDK_GO_LOGGING` to the value 'all'. +* Added `Logger.Writef()` to reduce the need for `ShouldLog()` checks. +* Add `LogLongRunningOperation` + + +## 0.10.0 (2020-09-10) +### Features Added +* The `request` and `transport` interfaces have been refactored to align with the patterns in the standard library. +* `NewRequest()` now uses `http.NewRequestWithContext()` and performs additional validation, it also requires a context parameter. +* The `Policy` and `Transport` interfaces have had their context parameter removed as the context is associated with the underlying `http.Request`. +* `Pipeline.Do()` will validate the HTTP request before sending it through the pipeline, avoiding retries on a malformed request. +* The `Retrier` interface has been replaced with the `NonRetriableError` interface, and the retry policy updated to test for this. +* `Request.SetBody()` now requires a content type parameter for setting the request's MIME type. +* moved path concatenation into `JoinPaths()` func + + +## 0.9.6 (2020-08-18) +### Features Added +* Improvements to body download policy +* Always download the response body for error responses, i.e. HTTP status codes >= 400. +* Simplify variable declarations + + +## 0.9.5 (2020-08-11) +### Features Added +* Set the Content-Length header in `Request.SetBody` + + +## 0.9.4 (2020-08-03) +### Features Added +* Fix cancellation of per try timeout +* Per try timeout is used to ensure that an HTTP operation doesn't take too long, e.g. that a GET on some URL doesn't take an inordinant amount of time. +* Once the HTTP request returns, the per try timeout should be cancelled, not when the response has been read to completion. +* Do not drain response body if there are no more retries +* Do not retry non-idempotent operations when body download fails + + +## 0.9.3 (2020-07-28) +### Features Added +* Add support for custom HTTP request headers +* Inserts an internal policy into the pipeline that can extract HTTP header values from the caller's context, adding them to the request. +* Use `azcore.WithHTTPHeader` to add HTTP headers to a context. +* Remove method specific to Go 1.14 + + +## 0.9.2 (2020-07-28) +### Features Added +* Omit read-only content from request payloads +* If any field in a payload's object graph contains `azure:"ro"`, make a clone of the object graph, omitting all fields with this annotation. +* Verify no fields were dropped +* Handle embedded struct types +* Added test for cloning by value +* Add messages to failures + + +## 0.9.1 (2020-07-22) +### Features Added +* Updated dependency on internal module to fix race condition. + + +## 0.9.0 (2020-07-09) +### Features Added +* Add `HTTPResponse` interface to be used by callers to access the raw HTTP response from an error in the event of an API call failure. +* Updated `sdk/internal` dependency to latest version. +* Rename package alias + + +## 0.8.2 (2020-06-29) +### Features Added +* Added missing documentation comments + +### Bugs Fixed +* Fixed a bug in body download policy. + + +## 0.8.1 (2020-06-26) +### Features Added +* Miscellaneous clean-up reported by linters + + +## 0.8.0 (2020-06-01) +### Features Added +* Differentiate between standard and URL encoding. + + +## 0.7.1 (2020-05-27) +### Features Added +* Add support for for base64 encoding and decoding of payloads. + + +## 0.7.0 (2020-05-12) +### Features Added +* Change `RetryAfter()` to a function. + + +## 0.6.0 (2020-04-29) +### Features Added +* Updating `RetryAfter` to only return the detaion in the RetryAfter header + + +## 0.5.0 (2020-03-23) +### Features Added +* Export `TransportFunc` + +### Breaking Changes +* Removed `IterationDone` + + +## 0.4.1 (2020-02-25) +### Features Added +* Ensure per-try timeout is properly cancelled +* Explicitly call cancel the per-try timeout when the response body has been read/closed by the body download policy. +* When the response body is returned to the caller for reading/closing, wrap it in a `responseBodyReader` that will cancel the timeout when the body is closed. +* `Logger.Should()` will return false if no listener is set. + + +## 0.4.0 (2020-02-18) +### Features Added +* Enable custom `RetryOptions` to be specified per API call +* Added `WithRetryOptions()` that adds a custom `RetryOptions` to the provided context, allowing custom settings per API call. +* Remove 429 from the list of default HTTP status codes for retry. +* Change StatusCodesForRetry to a slice so consumers can append to it. +* Added support for retry-after in HTTP-date format. +* Cleaned up some comments specific to storage. +* Remove `Request.SetQueryParam()` +* Renamed `MaxTries` to `MaxRetries` + +## 0.3.0 (2020-01-16) +### Features Added +* Added `DefaultRetryOptions` to create initialized default options. + +### Breaking Changes +* Removed `Response.CheckStatusCode()` + + +## 0.2.0 (2020-01-15) +### Features Added +* Add support for marshalling and unmarshalling JSON +* Removed `Response.Payload` field +* Exit early when unmarsahlling if there is no payload + + +## 0.1.0 (2020-01-10) +### Features Added +* Initial release diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt new file mode 100644 index 000000000..48ea6616b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md new file mode 100644 index 000000000..35a74e18d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/README.md @@ -0,0 +1,39 @@ +# Azure Core Client Module for Go + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azcore)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore) +[![Build Status](https://dev.azure.com/azure-sdk/public/_apis/build/status/go/go%20-%20azcore%20-%20ci?branchName=main)](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=1843&branchName=main) +[![Code Coverage](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main)](https://img.shields.io/azure-devops/coverage/azure-sdk/public/1843/main) + +The `azcore` module provides a set of common interfaces and types for Go SDK client modules. +These modules follow the [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html). + +## Getting started + +This project uses [Go modules](https://github.com/golang/go/wiki/Modules) for versioning and dependency management. + +Typically, you will not need to explicitly install `azcore` as it will be installed as a client module dependency. +To add the latest version to your `go.mod` file, execute the following command. + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azcore +``` + +General documentation and examples can be found on [pkg.go.dev](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore). + +## Contributing +This project welcomes contributions and suggestions. Most contributions require +you to agree to a Contributor License Agreement (CLA) declaring that you have +the right to, and actually do, grant us the rights to use your contribution. +For details, visit [https://cla.microsoft.com](https://cla.microsoft.com). + +When you submit a pull request, a CLA-bot will automatically determine whether +you need to provide a CLA and decorate the PR appropriately (e.g., label, +comment). Simply follow the instructions provided by the bot. You will only +need to do this once across all repos using our CLA. + +This project has adopted the +[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information, see the +[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) +or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any +additional questions or comments. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml new file mode 100644 index 000000000..aab921853 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml @@ -0,0 +1,29 @@ +# NOTE: Please refer to https://aka.ms/azsdk/engsys/ci-yaml before editing this file. +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/azcore/ + - eng/ + +stages: +- template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go new file mode 100644 index 000000000..9d077a3e1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/cloud.go @@ -0,0 +1,44 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package cloud + +var ( + // AzureChina contains configuration for Azure China. + AzureChina = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.chinacloudapi.cn/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzureGovernment contains configuration for Azure Government. + AzureGovernment = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.us/", Services: map[ServiceName]ServiceConfiguration{}, + } + // AzurePublic contains configuration for Azure Public Cloud. + AzurePublic = Configuration{ + ActiveDirectoryAuthorityHost: "https://login.microsoftonline.com/", Services: map[ServiceName]ServiceConfiguration{}, + } +) + +// ServiceName identifies a cloud service. +type ServiceName string + +// ResourceManager is a global constant identifying Azure Resource Manager. +const ResourceManager ServiceName = "resourceManager" + +// ServiceConfiguration configures a specific cloud service such as Azure Resource Manager. +type ServiceConfiguration struct { + // Audience is the audience the client will request for its access tokens. + Audience string + // Endpoint is the service's base URL. + Endpoint string +} + +// Configuration configures a cloud. +type Configuration struct { + // ActiveDirectoryAuthorityHost is the base URL of the cloud's Azure Active Directory. + ActiveDirectoryAuthorityHost string + // Services contains configuration for the cloud's services. + Services map[ServiceName]ServiceConfiguration +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go new file mode 100644 index 000000000..985b1bde2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud/doc.go @@ -0,0 +1,53 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +/* +Package cloud implements a configuration API for applications deployed to sovereign or private Azure clouds. + +Azure SDK client configuration defaults are appropriate for Azure Public Cloud (sometimes referred to as +"Azure Commercial" or simply "Microsoft Azure"). This package enables applications deployed to other +Azure Clouds to configure clients appropriately. + +This package contains predefined configuration for well-known sovereign clouds such as Azure Government and +Azure China. Azure SDK clients accept this configuration via the Cloud field of azcore.ClientOptions. For +example, configuring a credential and ARM client for Azure Government: + + opts := azcore.ClientOptions{Cloud: cloud.AzureGovernment} + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) + +Applications deployed to a private cloud such as Azure Stack create a Configuration object with +appropriate values: + + c := cloud.Configuration{ + ActiveDirectoryAuthorityHost: "https://...", + Services: map[cloud.ServiceName]cloud.ServiceConfiguration{ + cloud.ResourceManager: { + Audience: "...", + Endpoint: "https://...", + }, + }, + } + opts := azcore.ClientOptions{Cloud: c} + + cred, err := azidentity.NewDefaultAzureCredential( + &azidentity.DefaultAzureCredentialOptions{ClientOptions: opts}, + ) + handle(err) + + client, err := armsubscription.NewClient( + cred, &arm.ClientOptions{ClientOptions: opts}, + ) + handle(err) +*/ +package cloud diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go new file mode 100644 index 000000000..f9fb23422 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -0,0 +1,75 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "context" + "reflect" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// AccessToken represents an Azure service bearer access token with expiry information. +type AccessToken struct { + Token string + ExpiresOn time.Time +} + +// TokenCredential represents a credential capable of providing an OAuth token. +type TokenCredential interface { + // GetToken requests an access token for the specified set of scopes. + GetToken(ctx context.Context, options policy.TokenRequestOptions) (AccessToken, error) +} + +// holds sentinel values used to send nulls +var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} + +// NullValue is used to send an explicit 'null' within a request. +// This is typically used in JSON-MERGE-PATCH operations to delete a value. +func NullValue[T any]() T { + t := shared.TypeOfT[T]() + v, found := nullables[t] + if !found { + var o reflect.Value + if k := t.Kind(); k == reflect.Map { + o = reflect.MakeMap(t) + } else if k == reflect.Slice { + // empty slices appear to all point to the same data block + // which causes comparisons to become ambiguous. so we create + // a slice with len/cap of one which ensures a unique address. + o = reflect.MakeSlice(t, 1, 1) + } else { + o = reflect.New(t.Elem()) + } + v = o.Interface() + nullables[t] = v + } + // return the sentinel object + return v.(T) +} + +// IsNullValue returns true if the field contains a null sentinel value. +// This is used by custom marshallers to properly encode a null value. +func IsNullValue[T any](v T) bool { + // see if our map has a sentinel object for this *T + t := reflect.TypeOf(v) + if o, found := nullables[t]; found { + o1 := reflect.ValueOf(o) + v1 := reflect.ValueOf(v) + // we found it; return true if v points to the sentinel object. + // NOTE: maps and slices can only be compared to nil, else you get + // a runtime panic. so we compare addresses instead. + return o1.Pointer() == v1.Pointer() + } + // no sentinel object for this *t + return false +} + +// ClientOptions contains configuration settings for a client's pipeline. +type ClientOptions = policy.ClientOptions diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go new file mode 100644 index 000000000..7119699f9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/doc.go @@ -0,0 +1,257 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* +Package azcore implements an HTTP request/response middleware pipeline used by Azure SDK clients. + +The middleware consists of three components. + + - One or more Policy instances. + - A Transporter instance. + - A Pipeline instance that combines the Policy and Transporter instances. + +Implementing the Policy Interface + +A Policy can be implemented in two ways; as a first-class function for a stateless Policy, or as +a method on a type for a stateful Policy. Note that HTTP requests made via the same pipeline share +the same Policy instances, so if a Policy mutates its state it MUST be properly synchronized to +avoid race conditions. + +A Policy's Do method is called when an HTTP request wants to be sent over the network. The Do method can +perform any operation(s) it desires. For example, it can log the outgoing request, mutate the URL, headers, +and/or query parameters, inject a failure, etc. Once the Policy has successfully completed its request +work, it must call the Next() method on the *policy.Request instance in order to pass the request to the +next Policy in the chain. + +When an HTTP response comes back, the Policy then gets a chance to process the response/error. The Policy instance +can log the response, retry the operation if it failed due to a transient error or timeout, unmarshal the response +body, etc. Once the Policy has successfully completed its response work, it must return the *http.Response +and error instances to its caller. + +Template for implementing a stateless Policy: + + type policyFunc func(*policy.Request) (*http.Response, error) + // Do implements the Policy interface on policyFunc. + + func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) + } + + func NewMyStatelessPolicy() policy.Policy { + return policyFunc(func(req *policy.Request) (*http.Response, error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + }) + } + +Template for implementing a stateful Policy: + + type MyStatefulPolicy struct { + // TODO: add configuration/setting fields here + } + + // TODO: add initialization args to NewMyStatefulPolicy() + func NewMyStatefulPolicy() policy.Policy { + return &MyStatefulPolicy{ + // TODO: initialize configuration/setting fields here + } + } + + func (p *MyStatefulPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + // TODO: mutate/process Request here + + // forward Request to next Policy & get Response/error + resp, err := req.Next() + + // TODO: mutate/process Response/error here + + // return Response/error to previous Policy + return resp, err + } + +Implementing the Transporter Interface + +The Transporter interface is responsible for sending the HTTP request and returning the corresponding +HTTP response or error. The Transporter is invoked by the last Policy in the chain. The default Transporter +implementation uses a shared http.Client from the standard library. + +The same stateful/stateless rules for Policy implementations apply to Transporter implementations. + +Using Policy and Transporter Instances Via a Pipeline + +To use the Policy and Transporter instances, an application passes them to the runtime.NewPipeline function. + + func NewPipeline(transport Transporter, policies ...Policy) Pipeline + +The specified Policy instances form a chain and are invoked in the order provided to NewPipeline +followed by the Transporter. + +Once the Pipeline has been created, create a runtime.Request instance and pass it to Pipeline's Do method. + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + + func (p Pipeline) Do(req *Request) (*http.Request, error) + +The Pipeline.Do method sends the specified Request through the chain of Policy and Transporter +instances. The response/error is then sent through the same chain of Policy instances in reverse +order. For example, assuming there are Policy types PolicyA, PolicyB, and PolicyC along with +TransportA. + + pipeline := NewPipeline(TransportA, PolicyA, PolicyB, PolicyC) + +The flow of Request and Response looks like the following: + + policy.Request -> PolicyA -> PolicyB -> PolicyC -> TransportA -----+ + | + HTTP(S) endpoint + | + caller <--------- PolicyA <- PolicyB <- PolicyC <- http.Response-+ + +Creating a Request Instance + +The Request instance passed to Pipeline's Do method is a wrapper around an *http.Request. It also +contains some internal state and provides various convenience methods. You create a Request instance +by calling the runtime.NewRequest function: + + func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) + +If the Request should contain a body, call the SetBody method. + + func (req *Request) SetBody(body ReadSeekCloser, contentType string) error + +A seekable stream is required so that upon retry, the retry Policy instance can seek the stream +back to the beginning before retrying the network request and re-uploading the body. + +Sending an Explicit Null + +Operations like JSON-MERGE-PATCH send a JSON null to indicate a value should be deleted. + + { + "delete-me": null + } + +This requirement conflicts with the SDK's default marshalling that specifies "omitempty" as +a means to resolve the ambiguity between a field to be excluded and its zero-value. + + type Widget struct { + Name *string `json:",omitempty"` + Count *int `json:",omitempty"` + } + +In the above example, Name and Count are defined as pointer-to-type to disambiguate between +a missing value (nil) and a zero-value (0) which might have semantic differences. + +In a PATCH operation, any fields left as nil are to have their values preserved. When updating +a Widget's count, one simply specifies the new value for Count, leaving Name nil. + +To fulfill the requirement for sending a JSON null, the NullValue() function can be used. + + w := Widget{ + Count: azcore.NullValue[*int](), + } + +This sends an explict "null" for Count, indicating that any current value for Count should be deleted. + +Processing the Response + +When the HTTP response is received, the *http.Response is returned directly. Each Policy instance +can inspect/mutate the *http.Response. + +Built-in Logging + +To enable logging, set environment variable AZURE_SDK_GO_LOGGING to "all" before executing your program. + +By default the logger writes to stderr. This can be customized by calling log.SetListener, providing +a callback that writes to the desired location. Any custom logging implementation MUST provide its +own synchronization to handle concurrent invocations. + +See the docs for the log package for further details. + +Pageable Operations + +Pageable operations return potentially large data sets spread over multiple GET requests. The result of +each GET is a "page" of data consisting of a slice of items. + +Pageable operations can be identified by their New*Pager naming convention and return type of *runtime.Pager[T]. + + func (c *WidgetClient) NewListWidgetsPager(o *Options) *runtime.Pager[PageResponse] + +The call to WidgetClient.NewListWidgetsPager() returns an instance of *runtime.Pager[T] for fetching pages +and determining if there are more pages to fetch. No IO calls are made until the NextPage() method is invoked. + + pager := widgetClient.NewListWidgetsPager(nil) + for pager.More() { + page, err := pager.NextPage(context.TODO()) + // handle err + for _, widget := range page.Values { + // process widget + } + } + +Long-Running Operations + +Long-running operations (LROs) are operations consisting of an initial request to start the operation followed +by polling to determine when the operation has reached a terminal state. An LRO's terminal state is one +of the following values. + + * Succeeded - the LRO completed successfully + * Failed - the LRO failed to complete + * Canceled - the LRO was canceled + +LROs can be identified by their Begin* prefix and their return type of *runtime.Poller[T]. + + func (c *WidgetClient) BeginCreateOrUpdate(ctx context.Context, w Widget, o *Options) (*runtime.Poller[Response], error) + +When a call to WidgetClient.BeginCreateOrUpdate() returns a nil error, it means that the LRO has started. +It does _not_ mean that the widget has been created or updated (or failed to be created/updated). + +The *runtime.Poller[T] provides APIs for determining the state of the LRO. To wait for the LRO to complete, +call the PollUntilDone() method. + + poller, err := widgetClient.BeginCreateOrUpdate(context.TODO(), Widget{}, nil) + // handle err + result, err := poller.PollUntilDone(context.TODO(), nil) + // handle err + // use result + +The call to PollUntilDone() will block the current goroutine until the LRO has reached a terminal state or the +context is canceled/timed out. + +Note that LROs can take anywhere from several seconds to several minutes. The duration is operation-dependent. Due to +this variant behavior, pollers do _not_ have a preconfigured time-out. Use a context with the appropriate cancellation +mechanism as required. + +Resume Tokens + +Pollers provide the ability to serialize their state into a "resume token" which can be used by another process to +recreate the poller. This is achieved via the runtime.Poller[T].ResumeToken() method. + + token, err := poller.ResumeToken() + // handle error + +Note that a token can only be obtained for a poller that's in a non-terminal state. Also note that any subsequent calls +to poller.Poll() might change the poller's state. In this case, a new token should be created. + +After the token has been obtained, it can be used to recreate an instance of the originating poller. + + poller, err := widgetClient.BeginCreateOrUpdate(nil, Widget{}, &Options{ + ResumeToken: token, + }) + +When resuming a poller, no IO is performed, and zero-value arguments can be used for everything but the Options.ResumeToken. + +Resume tokens are unique per service client and operation. Attempting to resume a poller for LRO BeginB() with a token from LRO +BeginA() will result in an error. +*/ +package azcore diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go new file mode 100644 index 000000000..17bd50c67 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go @@ -0,0 +1,14 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +type ResponseError = exported.ResponseError diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go new file mode 100644 index 000000000..23ea7e7c8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/etag.go @@ -0,0 +1,48 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azcore + +import ( + "strings" +) + +// ETag is a property used for optimistic concurrency during updates +// ETag is a validator based on https://tools.ietf.org/html/rfc7232#section-2.3.2 +// An ETag can be empty (""). +type ETag string + +// ETagAny is an ETag that represents everything, the value is "*" +const ETagAny ETag = "*" + +// Equals does a strong comparison of two ETags. Equals returns true when both +// ETags are not weak and the values of the underlying strings are equal. +func (e ETag) Equals(other ETag) bool { + return !e.IsWeak() && !other.IsWeak() && e == other +} + +// WeakEquals does a weak comparison of two ETags. Two ETags are equivalent if their opaque-tags match +// character-by-character, regardless of either or both being tagged as "weak". +func (e ETag) WeakEquals(other ETag) bool { + getStart := func(e1 ETag) int { + if e1.IsWeak() { + return 2 + } + return 0 + } + aStart := getStart(e) + bStart := getStart(other) + + aVal := e[aStart:] + bVal := other[bStart:] + + return aVal == bVal +} + +// IsWeak specifies whether the ETag is strong or weak. +func (e ETag) IsWeak() bool { + return len(e) >= 4 && strings.HasPrefix(string(e), "W/\"") && strings.HasSuffix(string(e), "\"") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go new file mode 100644 index 000000000..8fca32a7d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -0,0 +1,61 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "io" + "io/ioutil" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +// Exported as streaming.NopCloser(). +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +// Exported as runtime.HasStatusCode(). +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +// Exported as runtime.Payload(). +func Payload(resp *http.Response) ([]byte, error) { + // r.Body won't be a nopClosingBytesReader if downloading was skipped + if buf, ok := resp.Body.(*shared.NopClosingBytesReader); ok { + return buf.Bytes(), nil + } + bytesBody, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, err + } + resp.Body = shared.NewNopClosingBytesReader(bytesBody) + return bytesBody, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go new file mode 100644 index 000000000..c44efd6ef --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/pipeline.go @@ -0,0 +1,97 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "errors" + "fmt" + "net/http" + + "golang.org/x/net/http/httpguts" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +// Exported as policy.Policy. +type Policy interface { + // Do applies the policy to the specified Request. When implementing a Policy, mutate the + // request before calling req.Next() to move on to the next policy, and respond to the result + // before returning to the caller. + Do(req *Request) (*http.Response, error) +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +// Exported as runtime.Pipeline. +type Pipeline struct { + policies []Policy +} + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +// Exported as policy.Transporter. +type Transporter interface { + // Do sends the HTTP request and returns the HTTP response or error. + Do(req *http.Request) (*http.Response, error) +} + +// used to adapt a TransportPolicy to a Policy +type transportPolicy struct { + trans Transporter +} + +func (tp transportPolicy) Do(req *Request) (*http.Response, error) { + if tp.trans == nil { + return nil, errors.New("missing transporter") + } + resp, err := tp.trans.Do(req.Raw()) + if err != nil { + return nil, err + } else if resp == nil { + // there was no response and no error (rare but can happen) + // this ensures the retry policy will retry the request + return nil, errors.New("received nil response") + } + return resp, nil +} + +// NewPipeline creates a new Pipeline object from the specified Policies. +// Not directly exported, but used as part of runtime.NewPipeline(). +func NewPipeline(transport Transporter, policies ...Policy) Pipeline { + // transport policy must always be the last in the slice + policies = append(policies, transportPolicy{trans: transport}) + return Pipeline{ + policies: policies, + } +} + +// Do is called for each and every HTTP request. It passes the request through all +// the Policy objects (which can transform the Request's URL/query parameters/headers) +// and ultimately sends the transformed HTTP request over the network. +func (p Pipeline) Do(req *Request) (*http.Response, error) { + if req == nil { + return nil, errors.New("request cannot be nil") + } + // check copied from Transport.roundTrip() + for k, vv := range req.Raw().Header { + if !httpguts.ValidHeaderFieldName(k) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + if req.Raw().Body != nil { + req.Raw().Body.Close() + } + return nil, fmt.Errorf("invalid header field value %q for key %v", v, k) + } + } + } + req.policies = p.policies + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go new file mode 100644 index 000000000..4aeec1589 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use NewRequest() instead. +// Exported as policy.Request. +type Request struct { + req *http.Request + body io.ReadSeekCloser + policies []Policy + values opValues +} + +type opValues map[reflect.Type]interface{} + +// Set adds/changes a value +func (ov opValues) set(value interface{}) { + ov[reflect.TypeOf(value)] = value +} + +// Get looks for a value set by SetValue first +func (ov opValues) get(value interface{}) bool { + v, ok := ov[reflect.ValueOf(value).Elem().Type()] + if ok { + reflect.ValueOf(value).Elem().Set(reflect.ValueOf(v)) + } + return ok +} + +// NewRequest creates a new Request with the specified input. +// Exported as runtime.NewRequest(). +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Request, error) { + req, err := http.NewRequestWithContext(ctx, httpMethod, endpoint, nil) + if err != nil { + return nil, err + } + if req.URL.Host == "" { + return nil, errors.New("no Host in request URL") + } + if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") { + return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme) + } + return &Request{req: req}, nil +} + +// Body returns the original body specified when the Request was created. +func (req *Request) Body() io.ReadSeekCloser { + return req.body +} + +// Raw returns the underlying HTTP request. +func (req *Request) Raw() *http.Request { + return req.req +} + +// Next calls the next policy in the pipeline. +// If there are no more policies, nil and an error are returned. +// This method is intended to be called from pipeline policies. +// To send a request through a pipeline call Pipeline.Do(). +func (req *Request) Next() (*http.Response, error) { + if len(req.policies) == 0 { + return nil, errors.New("no more policies") + } + nextPolicy := req.policies[0] + nextReq := *req + nextReq.policies = nextReq.policies[1:] + return nextPolicy.Do(&nextReq) +} + +// SetOperationValue adds/changes a mutable key/value associated with a single operation. +func (req *Request) SetOperationValue(value interface{}) { + if req.values == nil { + req.values = opValues{} + } + req.values.set(value) +} + +// OperationValue looks for a value set by SetOperationValue(). +func (req *Request) OperationValue(value interface{}) bool { + if req.values == nil { + return false + } + return req.values.get(value) +} + +// SetBody sets the specified ReadSeekCloser as the HTTP request body. +func (req *Request) SetBody(body io.ReadSeekCloser, contentType string) error { + // Set the body and content length. + size, err := body.Seek(0, io.SeekEnd) // Seek to the end to get the stream's size + if err != nil { + return err + } + if size == 0 { + body.Close() + return nil + } + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return err + } + req.Raw().GetBody = func() (io.ReadCloser, error) { + _, err := body.Seek(0, io.SeekStart) // Seek back to the beginning of the stream + return body, err + } + // keep a copy of the original body. this is to handle cases + // where req.Body is replaced, e.g. httputil.DumpRequest and friends. + req.body = body + req.req.Body = body + req.req.ContentLength = size + req.req.Header.Set(shared.HeaderContentType, contentType) + req.req.Header.Set(shared.HeaderContentLength, strconv.FormatInt(size, 10)) + return nil +} + +// RewindBody seeks the request's Body stream back to the beginning so it can be resent when retrying an operation. +func (req *Request) RewindBody() error { + if req.body != nil { + // Reset the stream back to the beginning and restore the body + _, err := req.body.Seek(0, io.SeekStart) + req.req.Body = req.body + return err + } + return nil +} + +// Close closes the request body. +func (req *Request) Close() error { + if req.body == nil { + return nil + } + return req.body.Close() +} + +// Clone returns a deep copy of the request with its context changed to ctx. +func (req *Request) Clone(ctx context.Context) *Request { + r2 := *req + r2.req = req.req.Clone(ctx) + return &r2 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go new file mode 100644 index 000000000..3db6acc83 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go @@ -0,0 +1,142 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package exported + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "regexp" +) + +// NewResponseError creates a new *ResponseError from the provided HTTP response. +// Exported as runtime.NewResponseError(). +func NewResponseError(resp *http.Response) error { + respErr := &ResponseError{ + StatusCode: resp.StatusCode, + RawResponse: resp, + } + + // prefer the error code in the response header + if ec := resp.Header.Get("x-ms-error-code"); ec != "" { + respErr.ErrorCode = ec + return respErr + } + + // if we didn't get x-ms-error-code, check in the response body + body, err := Payload(resp) + if err != nil { + return err + } + + if len(body) > 0 { + if code := extractErrorCodeJSON(body); code != "" { + respErr.ErrorCode = code + } else if code := extractErrorCodeXML(body); code != "" { + respErr.ErrorCode = code + } + } + + return respErr +} + +func extractErrorCodeJSON(body []byte) string { + var rawObj map[string]interface{} + if err := json.Unmarshal(body, &rawObj); err != nil { + // not a JSON object + return "" + } + + // check if this is a wrapped error, i.e. { "error": { ... } } + // if so then unwrap it + if wrapped, ok := rawObj["error"]; ok { + unwrapped, ok := wrapped.(map[string]interface{}) + if !ok { + return "" + } + rawObj = unwrapped + } else if wrapped, ok := rawObj["odata.error"]; ok { + // check if this a wrapped odata error, i.e. { "odata.error": { ... } } + unwrapped, ok := wrapped.(map[string]any) + if !ok { + return "" + } + rawObj = unwrapped + } + + // now check for the error code + code, ok := rawObj["code"] + if !ok { + return "" + } + codeStr, ok := code.(string) + if !ok { + return "" + } + return codeStr +} + +func extractErrorCodeXML(body []byte) string { + // regular expression is much easier than dealing with the XML parser + rx := regexp.MustCompile(`<(?:\w+:)?[c|C]ode>\s*(\w+)\s*<\/(?:\w+:)?[c|C]ode>`) + res := rx.FindStringSubmatch(string(body)) + if len(res) != 2 { + return "" + } + // first submatch is the entire thing, second one is the captured error code + return res[1] +} + +// ResponseError is returned when a request is made to a service and +// the service returns a non-success HTTP status code. +// Use errors.As() to access this type in the error chain. +// Exported as azcore.ResponseError. +type ResponseError struct { + // ErrorCode is the error code returned by the resource provider if available. + ErrorCode string + + // StatusCode is the HTTP status code as defined in https://pkg.go.dev/net/http#pkg-constants. + StatusCode int + + // RawResponse is the underlying HTTP response. + RawResponse *http.Response +} + +// Error implements the error interface for type ResponseError. +// Note that the message contents are not contractual and can change over time. +func (e *ResponseError) Error() string { + // write the request method and URL with response status code + msg := &bytes.Buffer{} + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) + if e.ErrorCode != "" { + fmt.Fprintf(msg, "ERROR CODE: %s\n", e.ErrorCode) + } else { + fmt.Fprintln(msg, "ERROR CODE UNAVAILABLE") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + body, err := Payload(e.RawResponse) + if err != nil { + // this really shouldn't fail at this point as the response + // body is already cached (it was read in NewResponseError) + fmt.Fprintf(msg, "Error reading response body: %v", err) + } else if len(body) > 0 { + if err := json.Indent(msg, body, "", " "); err != nil { + // failed to pretty-print so just dump it verbatim + fmt.Fprint(msg, string(body)) + } + // the standard library doesn't have a pretty-printer for XML + fmt.Fprintln(msg) + } else { + fmt.Fprintln(msg, "Response contained no body") + } + fmt.Fprintln(msg, "--------------------------------------------------------------------------------") + + return msg.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go new file mode 100644 index 000000000..0684cb317 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log/log.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// This is an internal helper package to combine the complete logging APIs. +package log + +import ( + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +type Event = log.Event + +const ( + EventRequest = azlog.EventRequest + EventResponse = azlog.EventResponse + EventRetryPolicy = azlog.EventRetryPolicy + EventLRO = azlog.EventLRO +) + +func Write(cls log.Event, msg string) { + log.Write(cls, msg) +} + +func Writef(cls log.Event, format string, a ...interface{}) { + log.Writef(cls, format, a...) +} + +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +func Should(cls log.Event) bool { + return log.Should(cls) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go new file mode 100644 index 000000000..0194b8b01 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async/async.go @@ -0,0 +1,147 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package async + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/async-api-reference.md + +// Applicable returns true if the LRO is using Azure-AsyncOperation. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderAzureAsync) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["asyncURL"] + return ok +} + +// Poller is an LRO poller that uses the Azure-AsyncOperation pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The URL from Azure-AsyncOperation header. + AsyncURL string `json:"asyncURL"` + + // The URL from Location header. + LocURL string `json:"locURL"` + + // The URL from the initial LRO request. + OrigURL string `json:"origURL"` + + // The HTTP method from the initial LRO request. + Method string `json:"method"` + + // The value of final-state-via from swagger, can be the empty string. + FinalState pollers.FinalStateVia `json:"finalState"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response and final-state type. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Azure-AsyncOperation poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Azure-AsyncOperation poller.") + asyncURL := resp.Header.Get(shared.HeaderAzureAsync) + if asyncURL == "" { + return nil, errors.New("response is missing Azure-AsyncOperation header") + } + if !pollers.IsValidURL(asyncURL) { + return nil, fmt.Errorf("invalid polling URL %s", asyncURL) + } + p := &Poller[T]{ + pl: pl, + resp: resp, + AsyncURL: asyncURL, + LocURL: resp.Header.Get(shared.HeaderLocation), + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: pollers.StatusInProgress, + } + return p, nil +} + +// Done returns true if the LRO is in a terminal state. +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +// Poll retrieves the current state of the LRO. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.AsyncURL, p.pl, func(resp *http.Response) (string, error) { + state, err := pollers.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + if p.resp.StatusCode == http.StatusNoContent { + return nil + } else if pollers.Failed(p.CurState) { + return exported.NewResponseError(p.resp) + } + var req *exported.Request + var err error + if p.Method == http.MethodPatch || p.Method == http.MethodPut { + // for PATCH and PUT, the final GET is on the original resource URL + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost { + if p.FinalState == pollers.FinalStateViaAzureAsyncOp { + // no final GET required + } else if p.FinalState == pollers.FinalStateViaOriginalURI { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.LocURL != "" { + // ideally FinalState would be set to "location" but it isn't always. + // must check last due to more permissive condition. + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go new file mode 100644 index 000000000..99e9f2f8d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body/body.go @@ -0,0 +1,130 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package body + +import ( + "context" + "errors" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" +) + +// Kind is the identifier of this type in a resume token. +const kind = "body" + +// Applicable returns true if the LRO is using no headers, just provisioning state. +// This is only applicable to PATCH and PUT methods and assumes no polling headers. +func Applicable(resp *http.Response) bool { + // we can't check for absense of headers due to some misbehaving services + // like redis that return a Location header but don't actually use that protocol + return resp.Request.Method == http.MethodPatch || resp.Request.Method == http.MethodPut +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Body pattern. +type Poller[T any] struct { + pl exported.Pipeline + + resp *http.Response + + // The poller's type, used for resume token processing. + Type string `json:"type"` + + // The URL for polling. + PollURL string `json:"pollURL"` + + // The LRO's current state. + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Body poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Body poller.") + p := &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: resp.Request.URL.String(), + } + // default initial state to InProgress. depending on the HTTP + // status code and provisioning state, we might change the value. + curState := pollers.StatusInProgress + provState, err := pollers.GetProvisioningState(resp) + if err != nil && !errors.Is(err, pollers.ErrNoBody) { + return nil, err + } + if resp.StatusCode == http.StatusCreated && provState != "" { + // absense of provisioning state is ok for a 201, means the operation is in progress + curState = provState + } else if resp.StatusCode == http.StatusOK { + if provState != "" { + curState = provState + } else if provState == "" { + // for a 200, absense of provisioning state indicates success + curState = pollers.StatusSucceeded + } + } else if resp.StatusCode == http.StatusNoContent { + curState = pollers.StatusSucceeded + } + p.CurState = curState + return p, nil +} + +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + if resp.StatusCode == http.StatusNoContent { + p.resp = resp + p.CurState = pollers.StatusSucceeded + return p.CurState, nil + } + state, err := pollers.GetProvisioningState(resp) + if errors.Is(err, pollers.ErrNoBody) { + // a missing response body in non-204 case is an error + return "", err + } else if state == "" { + // a response body without provisioning state is considered terminal success + state = pollers.StatusSucceeded + } else if err != nil { + return "", err + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go new file mode 100644 index 000000000..56c2b9029 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc/loc.go @@ -0,0 +1,111 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package loc + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Kind is the identifier of this type in a resume token. +const kind = "loc" + +// Applicable returns true if the LRO is using Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + t, ok := token["type"] + if !ok { + return false + } + tt, ok := t.(string) + if !ok { + return false + } + return tt == kind +} + +// Poller is an LRO poller that uses the Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + Type string `json:"type"` + PollURL string `json:"pollURL"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Location poller.") + locURL := resp.Header.Get(shared.HeaderLocation) + if locURL == "" { + return nil, errors.New("response is missing Location header") + } + if !pollers.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid polling URL %s", locURL) + } + return &Poller[T]{ + pl: pl, + resp: resp, + Type: kind, + PollURL: locURL, + CurState: pollers.StatusInProgress, + }, nil +} + +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.PollURL, p.pl, func(resp *http.Response) (string, error) { + // location polling can return an updated polling URL + if h := resp.Header.Get(shared.HeaderLocation); h != "" { + p.PollURL = h + } + // if provisioning state is available, use that. this is only + // for some ARM LRO scenarios (e.g. DELETE with a Location header) + // so if it's missing then use HTTP status code. + provState, _ := pollers.GetProvisioningState(resp) + p.resp = resp + if provState != "" { + p.CurState = provState + } else if resp.StatusCode == http.StatusAccepted { + p.CurState = pollers.StatusInProgress + } else if resp.StatusCode > 199 && resp.StatusCode < 300 { + // any 2xx other than a 202 indicates success + p.CurState = pollers.StatusSucceeded + } else { + p.CurState = pollers.StatusFailed + } + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go new file mode 100644 index 000000000..dd714e768 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go @@ -0,0 +1,140 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package op + +import ( + "context" + "errors" + "fmt" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Applicable returns true if the LRO is using Operation-Location. +func Applicable(resp *http.Response) bool { + return resp.Header.Get(shared.HeaderOperationLocation) != "" +} + +// CanResume returns true if the token can rehydrate this poller type. +func CanResume(token map[string]interface{}) bool { + _, ok := token["oplocURL"] + return ok +} + +// Poller is an LRO poller that uses the Operation-Location pattern. +type Poller[T any] struct { + pl exported.Pipeline + resp *http.Response + + OpLocURL string `json:"oplocURL"` + LocURL string `json:"locURL"` + OrigURL string `json:"origURL"` + Method string `json:"method"` + FinalState pollers.FinalStateVia `json:"finalState"` + CurState string `json:"state"` +} + +// New creates a new Poller from the provided initial response. +// Pass nil for response to create an empty Poller for rehydration. +func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) { + if resp == nil { + log.Write(log.EventLRO, "Resuming Operation-Location poller.") + return &Poller[T]{pl: pl}, nil + } + log.Write(log.EventLRO, "Using Operation-Location poller.") + opURL := resp.Header.Get(shared.HeaderOperationLocation) + if opURL == "" { + return nil, errors.New("response is missing Operation-Location header") + } + if !pollers.IsValidURL(opURL) { + return nil, fmt.Errorf("invalid Operation-Location URL %s", opURL) + } + locURL := resp.Header.Get(shared.HeaderLocation) + // Location header is optional + if locURL != "" && !pollers.IsValidURL(locURL) { + return nil, fmt.Errorf("invalid Location URL %s", locURL) + } + // default initial state to InProgress. if the + // service sent us a status then use that instead. + curState := pollers.StatusInProgress + status, err := pollers.GetStatus(resp) + if err != nil && !errors.Is(err, pollers.ErrNoBody) { + return nil, err + } + if status != "" { + curState = status + } + + return &Poller[T]{ + pl: pl, + resp: resp, + OpLocURL: opURL, + LocURL: locURL, + OrigURL: resp.Request.URL.String(), + Method: resp.Request.Method, + FinalState: finalState, + CurState: curState, + }, nil +} + +func (p *Poller[T]) Done() bool { + return pollers.IsTerminalState(p.CurState) +} + +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + err := pollers.PollHelper(ctx, p.OpLocURL, p.pl, func(resp *http.Response) (string, error) { + state, err := pollers.GetStatus(resp) + if err != nil { + return "", err + } else if state == "" { + return "", errors.New("the response did not contain a status") + } + p.resp = resp + p.CurState = state + return p.CurState, nil + }) + if err != nil { + return nil, err + } + return p.resp, nil +} + +func (p *Poller[T]) Result(ctx context.Context, out *T) error { + var req *exported.Request + var err error + if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } else if p.FinalState == pollers.FinalStateViaOpLocation && p.Method == http.MethodPost { + // no final GET required, terminal response should have it + } else if rl, rlErr := pollers.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, pollers.ErrNoBody) { + return rlErr + } else if rl != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, rl) + } else if p.Method == http.MethodPatch || p.Method == http.MethodPut { + req, err = exported.NewRequest(ctx, http.MethodGet, p.OrigURL) + } else if p.Method == http.MethodPost && p.LocURL != "" { + req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL) + } + if err != nil { + return err + } + + // if a final GET request has been created, execute it + if req != nil { + resp, err := p.pl.Do(req) + if err != nil { + return err + } + p.resp = resp + } + + return pollers.ResultHelper(p.resp, pollers.Failed(p.CurState), out) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go new file mode 100644 index 000000000..37ed647f4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/poller.go @@ -0,0 +1,24 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia string + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp FinalStateVia = "azure-async-operation" + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation FinalStateVia = "location" + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI FinalStateVia = "original-uri" + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation FinalStateVia = "operation-location" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go new file mode 100644 index 000000000..17ab7dadc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/util.go @@ -0,0 +1,317 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package pollers + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// the well-known set of LRO status/provisioning state values. +const ( + StatusSucceeded = "Succeeded" + StatusCanceled = "Canceled" + StatusFailed = "Failed" + StatusInProgress = "InProgress" +) + +// IsTerminalState returns true if the LRO's state is terminal. +func IsTerminalState(s string) bool { + return strings.EqualFold(s, StatusSucceeded) || strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) +} + +// Failed returns true if the LRO's state is terminal failure. +func Failed(s string) bool { + return strings.EqualFold(s, StatusFailed) || strings.EqualFold(s, StatusCanceled) +} + +// Succeeded returns true if the LRO's state is terminal success. +func Succeeded(s string) bool { + return strings.EqualFold(s, StatusSucceeded) +} + +// returns true if the LRO response contains a valid HTTP status code +func StatusCodeValid(resp *http.Response) bool { + return exported.HasStatusCode(resp, http.StatusOK, http.StatusAccepted, http.StatusCreated, http.StatusNoContent) +} + +// IsValidURL verifies that the URL is valid and absolute. +func IsValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// getTokenTypeName creates a type name from the type parameter T. +func getTokenTypeName[T any]() (string, error) { + tt := shared.TypeOfT[T]() + var n string + if tt.Kind() == reflect.Pointer { + n = "*" + tt = tt.Elem() + } + n += tt.Name() + if n == "" { + return "", errors.New("nameless types are not allowed") + } + return n, nil +} + +type resumeTokenWrapper[T any] struct { + Type string `json:"type"` + Token T `json:"token"` +} + +// NewResumeToken creates a resume token from the specified type. +// An error is returned if the generic type has no name (e.g. struct{}). +func NewResumeToken[TResult, TSource any](from TSource) (string, error) { + n, err := getTokenTypeName[TResult]() + if err != nil { + return "", err + } + b, err := json.Marshal(resumeTokenWrapper[TSource]{ + Type: n, + Token: from, + }) + if err != nil { + return "", err + } + return string(b), nil +} + +// ExtractToken returns the poller-specific token information from the provided token value. +func ExtractToken(token string) ([]byte, error) { + raw := map[string]json.RawMessage{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return nil, err + } + // this is dependent on the type resumeTokenWrapper[T] + tk, ok := raw["token"] + if !ok { + return nil, errors.New("missing token value") + } + return tk, nil +} + +// IsTokenValid returns an error if the specified token isn't applicable for generic type T. +func IsTokenValid[T any](token string) error { + raw := map[string]interface{}{} + if err := json.Unmarshal([]byte(token), &raw); err != nil { + return err + } + t, ok := raw["type"] + if !ok { + return errors.New("missing type value") + } + tt, ok := t.(string) + if !ok { + return fmt.Errorf("invalid type format %T", t) + } + n, err := getTokenTypeName[T]() + if err != nil { + return err + } + if tt != n { + return fmt.Errorf("cannot resume from this poller token. token is for type %s, not %s", tt, n) + } + return nil +} + +// ErrNoBody is returned if the response didn't contain a body. +var ErrNoBody = errors.New("the response did not contain a body") + +// GetJSON reads the response body into a raw JSON object. +// It returns ErrNoBody if there was no content. +func GetJSON(resp *http.Response) (map[string]interface{}, error) { + body, err := exported.Payload(resp) + if err != nil { + return nil, err + } + if len(body) == 0 { + return nil, ErrNoBody + } + // unmarshall the body to get the value + var jsonBody map[string]interface{} + if err = json.Unmarshal(body, &jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +// provisioningState returns the provisioning state from the response or the empty string. +func provisioningState(jsonBody map[string]interface{}) string { + jsonProps, ok := jsonBody["properties"] + if !ok { + return "" + } + props, ok := jsonProps.(map[string]interface{}) + if !ok { + return "" + } + rawPs, ok := props["provisioningState"] + if !ok { + return "" + } + ps, ok := rawPs.(string) + if !ok { + return "" + } + return ps +} + +// status returns the status from the response or the empty string. +func status(jsonBody map[string]interface{}) string { + rawStatus, ok := jsonBody["status"] + if !ok { + return "" + } + status, ok := rawStatus.(string) + if !ok { + return "" + } + return status +} + +// GetStatus returns the LRO's status from the response body. +// Typically used for Azure-AsyncOperation flows. +// If there is no status in the response body the empty string is returned. +func GetStatus(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return status(jsonBody), nil +} + +// GetProvisioningState returns the LRO's state from the response body. +// If there is no state in the response body the empty string is returned. +func GetProvisioningState(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + return provisioningState(jsonBody), nil +} + +// GetResourceLocation returns the LRO's resourceLocation value from the response body. +// Typically used for Operation-Location flows. +// If there is no resourceLocation in the response body the empty string is returned. +func GetResourceLocation(resp *http.Response) (string, error) { + jsonBody, err := GetJSON(resp) + if err != nil { + return "", err + } + v, ok := jsonBody["resourceLocation"] + if !ok { + // it might be ok if the field doesn't exist, the caller must make that determination + return "", nil + } + vv, ok := v.(string) + if !ok { + return "", fmt.Errorf("the resourceLocation value %v was not in string format", v) + } + return vv, nil +} + +// used if the operation synchronously completed +type NopPoller[T any] struct { + resp *http.Response + result T +} + +// NewNopPoller creates a NopPoller from the provided response. +// It unmarshals the response body into an instance of T. +func NewNopPoller[T any](resp *http.Response) (*NopPoller[T], error) { + np := &NopPoller[T]{resp: resp} + if resp.StatusCode == http.StatusNoContent { + return np, nil + } + payload, err := exported.Payload(resp) + if err != nil { + return nil, err + } + if len(payload) == 0 { + return np, nil + } + if err = json.Unmarshal(payload, &np.result); err != nil { + return nil, err + } + return np, nil +} + +func (*NopPoller[T]) Done() bool { + return true +} + +func (p *NopPoller[T]) Poll(context.Context) (*http.Response, error) { + return p.resp, nil +} + +func (p *NopPoller[T]) Result(ctx context.Context, out *T) error { + *out = p.result + return nil +} + +// PollHelper creates and executes the request, calling update() with the response. +// If the request fails, the update func is not called. +// The update func returns the state of the operation for logging purposes or an error +// if it fails to extract the required state from the response. +func PollHelper(ctx context.Context, endpoint string, pl exported.Pipeline, update func(resp *http.Response) (string, error)) error { + req, err := exported.NewRequest(ctx, http.MethodGet, endpoint) + if err != nil { + return err + } + resp, err := pl.Do(req) + if err != nil { + return err + } + state, err := update(resp) + if err != nil { + return err + } + log.Writef(log.EventLRO, "State %s", state) + return nil +} + +// ResultHelper processes the response as success or failure. +// In the success case, it unmarshals the payload into either a new instance of T or out. +// In the failure case, it creates an *azcore.Response error from the response. +func ResultHelper[T any](resp *http.Response, failed bool, out *T) error { + // short-circuit the simple success case with no response body to unmarshal + if resp.StatusCode == http.StatusNoContent { + return nil + } + + defer resp.Body.Close() + if !StatusCodeValid(resp) || failed { + // the LRO failed. unmarshall the error and update state + return exported.NewResponseError(resp) + } + + // success case + payload, err := exported.Payload(resp) + if err != nil { + return err + } + if len(payload) == 0 { + return nil + } + + if err = json.Unmarshal(payload, out); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go new file mode 100644 index 000000000..1900edff7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +const ( + ContentTypeAppJSON = "application/json" + ContentTypeAppXML = "application/xml" +) + +const ( + HeaderAuthorization = "Authorization" + HeaderAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + HeaderAzureAsync = "Azure-AsyncOperation" + HeaderContentLength = "Content-Length" + HeaderContentType = "Content-Type" + HeaderLocation = "Location" + HeaderOperationLocation = "Operation-Location" + HeaderRetryAfter = "Retry-After" + HeaderUserAgent = "User-Agent" +) + +const BearerTokenPrefix = "Bearer " + +const ( + // Module is the name of the calling module used in telemetry data. + Module = "azcore" + + // Version is the semantic version (see http://semver.org) of this module. + Version = "v1.0.0" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go new file mode 100644 index 000000000..96eef2956 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -0,0 +1,135 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package shared + +import ( + "context" + "errors" + "io" + "net/http" + "reflect" + "strconv" + "time" +) + +// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. +type CtxWithHTTPHeaderKey struct{} + +// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. +type CtxWithRetryOptionsKey struct{} + +// CtxIncludeResponseKey is used as a context key for retrieving the raw response. +type CtxIncludeResponseKey struct{} + +// Delay waits for the duration to elapse or the context to be cancelled. +func Delay(ctx context.Context, delay time.Duration) error { + select { + case <-time.After(delay): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// RetryAfter returns non-zero if the response contains a Retry-After header value. +func RetryAfter(resp *http.Response) time.Duration { + if resp == nil { + return 0 + } + ra := resp.Header.Get(HeaderRetryAfter) + if ra == "" { + return 0 + } + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + return time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + return time.Until(t) + } + return 0 +} + +// TypeOfT returns the type of the generic type param. +func TypeOfT[T any]() reflect.Type { + // you can't, at present, obtain the type of + // a type parameter, so this is the trick + return reflect.TypeOf((*T)(nil)).Elem() +} + +// BytesSetter abstracts replacing a byte slice on some type. +type BytesSetter interface { + Set(b []byte) +} + +// NewNopClosingBytesReader creates a new *NopClosingBytesReader for the specified slice. +func NewNopClosingBytesReader(data []byte) *NopClosingBytesReader { + return &NopClosingBytesReader{s: data} +} + +// NopClosingBytesReader is an io.ReadSeekCloser around a byte slice. +// It also provides direct access to the byte slice to avoid rereading. +type NopClosingBytesReader struct { + s []byte + i int64 +} + +// Bytes returns the underlying byte slice. +func (r *NopClosingBytesReader) Bytes() []byte { + return r.s +} + +// Close implements the io.Closer interface. +func (*NopClosingBytesReader) Close() error { + return nil +} + +// Read implements the io.Reader interface. +func (r *NopClosingBytesReader) Read(b []byte) (n int, err error) { + if r.i >= int64(len(r.s)) { + return 0, io.EOF + } + n = copy(b, r.s[r.i:]) + r.i += int64(n) + return +} + +// Set replaces the existing byte slice with the specified byte slice and resets the reader. +func (r *NopClosingBytesReader) Set(b []byte) { + r.s = b + r.i = 0 +} + +// Seek implements the io.Seeker interface. +func (r *NopClosingBytesReader) Seek(offset int64, whence int) (int64, error) { + var i int64 + switch whence { + case io.SeekStart: + i = offset + case io.SeekCurrent: + i = r.i + offset + case io.SeekEnd: + i = int64(len(r.s)) + offset + default: + return 0, errors.New("nopClosingBytesReader: invalid whence") + } + if i < 0 { + return 0, errors.New("nopClosingBytesReader: negative position") + } + r.i = i + return i, nil +} + +var _ BytesSetter = (*NopClosingBytesReader)(nil) + +// TransportFunc is a helper to use a first-class func to satisfy the Transporter interface. +type TransportFunc func(*http.Request) (*http.Response, error) + +// Do implements the Transporter interface for the TransportFunc type. +func (pf TransportFunc) Do(req *http.Request) (*http.Response, error) { + return pf(req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go new file mode 100644 index 000000000..2f3901bff --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package log contains functionality for configuring logging behavior. +// Default logging to stderr can be enabled by setting environment variable AZURE_SDK_GO_LOGGING to "all". +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go new file mode 100644 index 000000000..7bde29d0a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/log/log.go @@ -0,0 +1,50 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +// Package log provides functionality for configuring logging facilities. +package log + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// Event is used to group entries. Each group can be toggled on or off. +type Event = log.Event + +const ( + // EventRequest entries contain information about HTTP requests. + // This includes information like the URL, query parameters, and headers. + EventRequest Event = "Request" + + // EventResponse entries contain information about HTTP responses. + // This includes information like the HTTP status code, headers, and request URL. + EventResponse Event = "Response" + + // EventRetryPolicy entries contain information specific to the retry policy in use. + EventRetryPolicy Event = "Retry" + + // EventLRO entries contain information specific to long-running operations. + // This includes information like polling location, operation state, and sleep intervals. + EventLRO Event = "LongRunningOperation" +) + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetEvents(cls ...Event) { + log.SetEvents(cls...) +} + +// SetListener will set the Logger to write to the specified Listener. +// NOTE: this is not goroutine safe and should be called before using SDK clients. +func SetListener(lst func(Event, string)) { + log.SetListener(lst) +} + +// for testing purposes +func resetEvents() { + log.TestResetEvents() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go new file mode 100644 index 000000000..fad2579ed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package policy contains the definitions needed for configuring in-box pipeline policies +// and creating custom policies. +package policy diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go new file mode 100644 index 000000000..bfc71e9a0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -0,0 +1,119 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +// Policy represents an extensibility point for the Pipeline that can mutate the specified +// Request and react to the received Response. +type Policy = exported.Policy + +// Transporter represents an HTTP pipeline transport used to send HTTP requests and receive responses. +type Transporter = exported.Transporter + +// Request is an abstraction over the creation of an HTTP request as it passes through the pipeline. +// Don't use this type directly, use runtime.NewRequest() instead. +type Request = exported.Request + +// ClientOptions contains optional settings for a client's pipeline. +// All zero-value fields will be initialized with default values. +type ClientOptions struct { + // Cloud specifies a cloud for the client. The default is Azure Public Cloud. + Cloud cloud.Configuration + + // Logging configures the built-in logging policy. + Logging LogOptions + + // Retry configures the built-in retry policy. + Retry RetryOptions + + // Telemetry configures the built-in telemetry policy. + Telemetry TelemetryOptions + + // Transport sets the transport for HTTP requests. + Transport Transporter + + // PerCallPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCallPolicies []Policy + + // PerRetryPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetryPolicies []Policy +} + +// LogOptions configures the logging policy's behavior. +type LogOptions struct { + // IncludeBody indicates if request and response bodies should be included in logging. + // The default value is false. + // NOTE: enabling this can lead to disclosure of sensitive information, use with care. + IncludeBody bool + + // AllowedHeaders is the slice of headers to log with their values intact. + // All headers not in the slice will have their values REDACTED. + // Applies to request and response headers. + AllowedHeaders []string + + // AllowedQueryParams is the slice of query parameters to log with their values intact. + // All query parameters not in the slice will have their values REDACTED. + AllowedQueryParams []string +} + +// RetryOptions configures the retry policy's behavior. +// Call NewRetryOptions() to create an instance with default values. +type RetryOptions struct { + // MaxRetries specifies the maximum number of attempts a failed operation will be retried + // before producing an error. + // The default value is three. A value less than zero means one try and no retries. + MaxRetries int32 + + // TryTimeout indicates the maximum time allowed for any single try of an HTTP request. + // This is disabled by default. Specify a value greater than zero to enable. + // NOTE: Setting this to a small value might cause premature HTTP request time-outs. + TryTimeout time.Duration + + // RetryDelay specifies the initial amount of delay to use before retrying an operation. + // The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. + // The default value is four seconds. A value less than zero means no delay between retries. + RetryDelay time.Duration + + // MaxRetryDelay specifies the maximum delay allowed before retrying an operation. + // Typically the value is greater than or equal to the value specified in RetryDelay. + // The default Value is 120 seconds. A value less than zero means there is no cap. + MaxRetryDelay time.Duration + + // StatusCodes specifies the HTTP status codes that indicate the operation should be retried. + // The default value is the status codes in StatusCodesForRetry. + // Specifying an empty slice will cause retries to happen only for transport errors. + StatusCodes []int +} + +// TelemetryOptions configures the telemetry policy's behavior. +type TelemetryOptions struct { + // ApplicationID is an application-specific identification string to add to the User-Agent. + // It has a maximum length of 24 characters and must not contain any spaces. + ApplicationID string + + // Disabled will prevent the addition of any telemetry data to the User-Agent. + Disabled bool +} + +// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. +type TokenRequestOptions struct { + // Scopes contains the list of permission scopes required for the token. + Scopes []string +} + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // placeholder for future options +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go new file mode 100644 index 000000000..c9cfa438c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/doc.go @@ -0,0 +1,10 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package runtime contains various facilities for creating requests and handling responses. +// The content is intended for SDK authors. +package runtime diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go new file mode 100644 index 000000000..6d03b291e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/errors.go @@ -0,0 +1,19 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +// NewResponseError creates an *azcore.ResponseError from the provided HTTP response. +// Call this when a service request returns a non-successful status code. +func NewResponseError(resp *http.Response) error { + return exported.NewResponseError(resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go new file mode 100644 index 000000000..5507665d6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -0,0 +1,77 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" +) + +// PagingHandler contains the required data for constructing a Pager. +type PagingHandler[T any] struct { + // More returns a boolean indicating if there are more pages to fetch. + // It uses the provided page to make the determination. + More func(T) bool + + // Fetcher fetches the first and subsequent pages. + Fetcher func(context.Context, *T) (T, error) +} + +// Pager provides operations for iterating over paged responses. +type Pager[T any] struct { + current *T + handler PagingHandler[T] + firstPage bool +} + +// NewPager creates an instance of Pager using the specified PagingHandler. +// Pass a non-nil T for firstPage if the first page has already been retrieved. +func NewPager[T any](handler PagingHandler[T]) *Pager[T] { + return &Pager[T]{ + handler: handler, + firstPage: true, + } +} + +// More returns true if there are more pages to retrieve. +func (p *Pager[T]) More() bool { + if p.current != nil { + return p.handler.More(*p.current) + } + return true +} + +// NextPage advances the pager to the next page. +func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { + var resp T + var err error + if p.current != nil { + if p.firstPage { + // we get here if it's an LRO-pager, we already have the first page + p.firstPage = false + return *p.current, nil + } else if !p.handler.More(*p.current) { + return *new(T), errors.New("no more pages") + } + resp, err = p.handler.Fetcher(ctx, p.current) + } else { + // non-LRO case, first page + p.firstPage = false + resp, err = p.handler.Fetcher(ctx, nil) + } + if err != nil { + return *new(T), err + } + p.current = &resp + return *p.current, nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface for Pager[T]. +func (p *Pager[T]) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &p.current) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go new file mode 100644 index 000000000..ad75ae2ab --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pipeline.go @@ -0,0 +1,73 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// PipelineOptions contains Pipeline options for SDK developers +type PipelineOptions struct { + AllowedHeaders, AllowedQueryParameters []string + PerCall, PerRetry []policy.Policy +} + +// Pipeline represents a primitive for sending HTTP requests and receiving responses. +// Its behavior can be extended by specifying policies during construction. +type Pipeline = exported.Pipeline + +// NewPipeline creates a pipeline from connection options, with any additional policies as specified. +// Policies from ClientOptions are placed after policies from PipelineOptions. +// The module and version parameters are used by the telemetry policy, when enabled. +func NewPipeline(module, version string, plOpts PipelineOptions, options *policy.ClientOptions) Pipeline { + cp := policy.ClientOptions{} + if options != nil { + cp = *options + } + if len(plOpts.AllowedHeaders) > 0 { + headers := make([]string, 0, len(plOpts.AllowedHeaders)+len(cp.Logging.AllowedHeaders)) + copy(headers, plOpts.AllowedHeaders) + headers = append(headers, cp.Logging.AllowedHeaders...) + cp.Logging.AllowedHeaders = headers + } + if len(plOpts.AllowedQueryParameters) > 0 { + qp := make([]string, 0, len(plOpts.AllowedQueryParameters)+len(cp.Logging.AllowedQueryParams)) + copy(qp, plOpts.AllowedQueryParameters) + qp = append(qp, cp.Logging.AllowedQueryParams...) + cp.Logging.AllowedQueryParams = qp + } + // we put the includeResponsePolicy at the very beginning so that the raw response + // is populated with the final response (some policies might mutate the response) + policies := []policy.Policy{policyFunc(includeResponsePolicy)} + if !cp.Telemetry.Disabled { + policies = append(policies, NewTelemetryPolicy(module, version, &cp.Telemetry)) + } + policies = append(policies, plOpts.PerCall...) + policies = append(policies, cp.PerCallPolicies...) + policies = append(policies, NewRetryPolicy(&cp.Retry)) + policies = append(policies, plOpts.PerRetry...) + policies = append(policies, cp.PerRetryPolicies...) + policies = append(policies, NewLogPolicy(&cp.Logging)) + policies = append(policies, policyFunc(httpHeaderPolicy), policyFunc(bodyDownloadPolicy)) + transport := cp.Transport + if transport == nil { + transport = defaultHTTPClient + } + return exported.NewPipeline(transport, policies...) +} + +// policyFunc is a type that implements the Policy interface. +// Use this type when implementing a stateless policy as a first-class function. +type policyFunc func(*policy.Request) (*http.Response, error) + +// Do implements the Policy interface on policyFunc. +func (pf policyFunc) Do(req *policy.Request) (*http.Response, error) { + return pf(req) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go new file mode 100644 index 000000000..71e3062be --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +type BearerTokenPolicy struct { + // mainResource is the resource to be retreived using the tenant specified in the credential + mainResource *temporal.Resource[azcore.AccessToken, acquiringResourceState] + // the following fields are read-only + cred azcore.TokenCredential + scopes []string +} + +type acquiringResourceState struct { + req *policy.Request + p *BearerTokenPolicy +} + +// acquire acquires or updates the resource; only one +// thread/goroutine at a time ever calls this function +func acquire(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(state.req.Raw().Context(), policy.TokenRequestOptions{Scopes: state.p.scopes}) + if err != nil { + return azcore.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// scopes: the list of permission scopes required for the token. +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred azcore.TokenCredential, scopes []string, opts *policy.BearerTokenOptions) *BearerTokenPolicy { + return &BearerTokenPolicy{ + cred: cred, + scopes: scopes, + mainResource: temporal.NewResource(acquire), + } +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + as := acquiringResourceState{ + p: b, + req: req, + } + tk, err := b.mainResource.Get(as) + if err != nil { + return nil, err + } + req.Raw().Header.Set(shared.HeaderAuthorization, shared.BearerTokenPrefix+tk.Token) + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go new file mode 100644 index 000000000..02d621ee8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_body_download.go @@ -0,0 +1,73 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "fmt" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +// bodyDownloadPolicy creates a policy object that downloads the response's body to a []byte. +func bodyDownloadPolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if err != nil { + return resp, err + } + var opValues bodyDownloadPolicyOpValues + // don't skip downloading error response bodies + if req.OperationValue(&opValues); opValues.Skip && resp.StatusCode < 400 { + return resp, err + } + // Either bodyDownloadPolicyOpValues was not specified (so skip is false) + // or it was specified and skip is false: don't skip downloading the body + _, err = exported.Payload(resp) + if err != nil { + return resp, newBodyDownloadError(err, req) + } + return resp, err +} + +// bodyDownloadPolicyOpValues is the struct containing the per-operation values +type bodyDownloadPolicyOpValues struct { + Skip bool +} + +type bodyDownloadError struct { + err error +} + +func newBodyDownloadError(err error, req *policy.Request) error { + // on failure, only retry the request for idempotent operations. + // we currently identify them as DELETE, GET, and PUT requests. + if m := strings.ToUpper(req.Raw().Method); m == http.MethodDelete || m == http.MethodGet || m == http.MethodPut { + // error is safe for retry + return err + } + // wrap error to avoid retries + return &bodyDownloadError{ + err: err, + } +} + +func (b *bodyDownloadError) Error() string { + return fmt.Sprintf("body download policy: %s", b.err.Error()) +} + +func (b *bodyDownloadError) NonRetriable() { + // marker method +} + +func (b *bodyDownloadError) Unwrap() error { + return b.err +} + +var _ errorinfo.NonRetriable = (*bodyDownloadError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go new file mode 100644 index 000000000..770e0a2b6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// newHTTPHeaderPolicy creates a policy object that adds custom HTTP headers to a request +func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { + // check if any custom HTTP headers have been specified + if header := req.Raw().Context().Value(shared.CtxWithHTTPHeaderKey{}); header != nil { + for k, v := range header.(http.Header) { + // use Set to replace any existing value + // it also canonicalizes the header key + req.Raw().Header.Set(k, v[0]) + // add any remaining values + for i := 1; i < len(v); i++ { + req.Raw().Header.Add(k, v[i]) + } + } + } + return req.Next() +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go new file mode 100644 index 000000000..4714baa30 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -0,0 +1,34 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// includeResponsePolicy creates a policy that retrieves the raw HTTP response upon request +func includeResponsePolicy(req *policy.Request) (*http.Response, error) { + resp, err := req.Next() + if resp == nil { + return resp, err + } + if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil { + httpOut := httpOutRaw.(**http.Response) + *httpOut = resp + } + return resp, err +} + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go new file mode 100644 index 000000000..faf175e3f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_logging.go @@ -0,0 +1,251 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "sort" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/diag" +) + +type logPolicy struct { + includeBody bool + allowedHeaders map[string]struct{} + allowedQP map[string]struct{} +} + +// NewLogPolicy creates a request/response logging policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewLogPolicy(o *policy.LogOptions) policy.Policy { + if o == nil { + o = &policy.LogOptions{} + } + // construct default hash set of allowed headers + allowedHeaders := map[string]struct{}{ + "accept": {}, + "cache-control": {}, + "connection": {}, + "content-length": {}, + "content-type": {}, + "date": {}, + "etag": {}, + "expires": {}, + "if-match": {}, + "if-modified-since": {}, + "if-none-match": {}, + "if-unmodified-since": {}, + "last-modified": {}, + "ms-cv": {}, + "pragma": {}, + "request-id": {}, + "retry-after": {}, + "server": {}, + "traceparent": {}, + "transfer-encoding": {}, + "user-agent": {}, + "www-authenticate": {}, + "x-ms-request-id": {}, + "x-ms-client-request-id": {}, + "x-ms-return-client-request-id": {}, + } + // add any caller-specified allowed headers to the set + for _, ah := range o.AllowedHeaders { + allowedHeaders[strings.ToLower(ah)] = struct{}{} + } + // now do the same thing for query params + allowedQP := map[string]struct{}{ + "api-version": {}, + } + for _, qp := range o.AllowedQueryParams { + allowedQP[strings.ToLower(qp)] = struct{}{} + } + return &logPolicy{ + includeBody: o.IncludeBody, + allowedHeaders: allowedHeaders, + allowedQP: allowedQP, + } +} + +// logPolicyOpValues is the struct containing the per-operation values +type logPolicyOpValues struct { + try int32 + start time.Time +} + +func (p *logPolicy) Do(req *policy.Request) (*http.Response, error) { + // Get the per-operation values. These are saved in the Message's map so that they persist across each retry calling into this policy object. + var opValues logPolicyOpValues + if req.OperationValue(&opValues); opValues.start.IsZero() { + opValues.start = time.Now() // If this is the 1st try, record this operation's start time + } + opValues.try++ // The first try is #1 (not #0) + req.SetOperationValue(opValues) + + // Log the outgoing request as informational + if log.Should(log.EventRequest) { + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> OUTGOING REQUEST (Try=%d)\n", opValues.try) + p.writeRequestWithResponse(b, req, nil, nil) + var err error + if p.includeBody { + err = writeReqBody(req, b) + } + log.Write(log.EventRequest, b.String()) + if err != nil { + return nil, err + } + } + + // Set the time for this particular retry operation and then Do the operation. + tryStart := time.Now() + response, err := req.Next() // Make the request + tryEnd := time.Now() + tryDuration := tryEnd.Sub(tryStart) + opDuration := tryEnd.Sub(opValues.start) + + if log.Should(log.EventResponse) { + // We're going to log this; build the string to log + b := &bytes.Buffer{} + fmt.Fprintf(b, "==> REQUEST/RESPONSE (Try=%d/%v, OpTime=%v) -- ", opValues.try, tryDuration, opDuration) + if err != nil { // This HTTP request did not get a response from the service + fmt.Fprint(b, "REQUEST ERROR\n") + } else { + fmt.Fprint(b, "RESPONSE RECEIVED\n") + } + + p.writeRequestWithResponse(b, req, response, err) + if err != nil { + // skip frames runtime.Callers() and runtime.StackTrace() + b.WriteString(diag.StackTrace(2, 32)) + } else if p.includeBody { + err = writeRespBody(response, b) + } + log.Write(log.EventResponse, b.String()) + } + return response, err +} + +const redactedValue = "REDACTED" + +// writeRequestWithResponse appends a formatted HTTP request into a Buffer. If request and/or err are +// not nil, then these are also written into the Buffer. +func (p *logPolicy) writeRequestWithResponse(b *bytes.Buffer, req *policy.Request, resp *http.Response, err error) { + // redact applicable query params + cpURL := *req.Raw().URL + qp := cpURL.Query() + for k := range qp { + if _, ok := p.allowedQP[strings.ToLower(k)]; !ok { + qp.Set(k, redactedValue) + } + } + cpURL.RawQuery = qp.Encode() + // Write the request into the buffer. + fmt.Fprint(b, " "+req.Raw().Method+" "+cpURL.String()+"\n") + p.writeHeader(b, req.Raw().Header) + if resp != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " RESPONSE Status: "+resp.Status+"\n") + p.writeHeader(b, resp.Header) + } + if err != nil { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprint(b, " ERROR:\n"+err.Error()+"\n") + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +func (p *logPolicy) writeHeader(b *bytes.Buffer, header http.Header) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + value := header.Get(k) + // redact all header values not in the allow-list + if _, ok := p.allowedHeaders[strings.ToLower(k)]; !ok { + value = redactedValue + } + fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} + +// returns true if the request/response body should be logged. +// this is determined by looking at the content-type header value. +func shouldLogBody(b *bytes.Buffer, contentType string) bool { + contentType = strings.ToLower(contentType) + if strings.HasPrefix(contentType, "text") || + strings.Contains(contentType, "json") || + strings.Contains(contentType, "xml") { + return true + } + fmt.Fprintf(b, " Skip logging body for %s\n", contentType) + return false +} + +// writes to a buffer, used for logging purposes +func writeReqBody(req *policy.Request, b *bytes.Buffer) error { + if req.Raw().Body == nil { + fmt.Fprint(b, " Request contained no body\n") + return nil + } + if ct := req.Raw().Header.Get(shared.HeaderContentType); !shouldLogBody(b, ct) { + return nil + } + body, err := ioutil.ReadAll(req.Raw().Body) + if err != nil { + fmt.Fprintf(b, " Failed to read request body: %s\n", err.Error()) + return err + } + if err := req.RewindBody(); err != nil { + return err + } + logBody(b, body) + return nil +} + +// writes to a buffer, used for logging purposes +func writeRespBody(resp *http.Response, b *bytes.Buffer) error { + ct := resp.Header.Get(shared.HeaderContentType) + if ct == "" { + fmt.Fprint(b, " Response contained no body\n") + return nil + } else if !shouldLogBody(b, ct) { + return nil + } + body, err := Payload(resp) + if err != nil { + fmt.Fprintf(b, " Failed to read response body: %s\n", err.Error()) + return err + } + if len(body) > 0 { + logBody(b, body) + } else { + fmt.Fprint(b, " Response contained no body\n") + } + return nil +} + +func logBody(b *bytes.Buffer, body []byte) { + fmt.Fprintln(b, " --------------------------------------------------------------------------------") + fmt.Fprintln(b, string(body)) + fmt.Fprintln(b, " --------------------------------------------------------------------------------") +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go new file mode 100644 index 000000000..db70955b2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_request_id.go @@ -0,0 +1,34 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +type requestIDPolicy struct{} + +// NewRequestIDPolicy returns a policy that add the x-ms-client-request-id header +func NewRequestIDPolicy() policy.Policy { + return &requestIDPolicy{} +} + +func (r *requestIDPolicy) Do(req *policy.Request) (*http.Response, error) { + const requestIdHeader = "x-ms-client-request-id" + if req.Raw().Header.Get(requestIdHeader) == "" { + id, err := uuid.New() + if err != nil { + return nil, err + } + req.Raw().Header.Set(requestIdHeader, id.String()) + } + + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go new file mode 100644 index 000000000..9d630e471 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -0,0 +1,242 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "io" + "math" + "math/rand" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" +) + +const ( + defaultMaxRetries = 3 +) + +func setDefaults(o *policy.RetryOptions) { + if o.MaxRetries == 0 { + o.MaxRetries = defaultMaxRetries + } else if o.MaxRetries < 0 { + o.MaxRetries = 0 + } + if o.MaxRetryDelay == 0 { + o.MaxRetryDelay = 120 * time.Second + } else if o.MaxRetryDelay < 0 { + // not really an unlimited cap, but sufficiently large enough to be considered as such + o.MaxRetryDelay = math.MaxInt64 + } + if o.RetryDelay == 0 { + o.RetryDelay = 4 * time.Second + } else if o.RetryDelay < 0 { + o.RetryDelay = 0 + } + if o.StatusCodes == nil { + o.StatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + } +} + +func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0 + pow := func(number int64, exponent int32) int64 { // pow is nested helper function + var result int64 = 1 + for n := int32(0); n < exponent; n++ { + result *= number + } + return result + } + + delay := time.Duration(pow(2, try)-1) * o.RetryDelay + + // Introduce some jitter: [0.0, 1.0) / 2 = [0.0, 0.5) + 0.8 = [0.8, 1.3) + delay = time.Duration(delay.Seconds() * (rand.Float64()/2 + 0.8) * float64(time.Second)) // NOTE: We want math/rand; not crypto/rand + if delay > o.MaxRetryDelay { + delay = o.MaxRetryDelay + } + return delay +} + +// NewRetryPolicy creates a policy object configured using the specified options. +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewRetryPolicy(o *policy.RetryOptions) policy.Policy { + if o == nil { + o = &policy.RetryOptions{} + } + p := &retryPolicy{options: *o} + return p +} + +type retryPolicy struct { + options policy.RetryOptions +} + +func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { + options := p.options + // check if the retry options have been overridden for this call + if override := req.Raw().Context().Value(shared.CtxWithRetryOptionsKey{}); override != nil { + options = override.(policy.RetryOptions) + } + setDefaults(&options) + // Exponential retry algorithm: ((2 ^ attempt) - 1) * delay * random(0.8, 1.2) + // When to retry: connection failure or temporary/timeout. + var rwbody *retryableRequestBody + if req.Body() != nil { + // wrap the body so we control when it's actually closed. + // do this outside the for loop so defers don't accumulate. + rwbody = &retryableRequestBody{body: req.Body()} + defer rwbody.realClose() + } + try := int32(1) + for { + resp = nil // reset + log.Writef(log.EventRetryPolicy, "\n=====> Try=%d %s %s", try, req.Raw().Method, req.Raw().URL.String()) + + // For each try, seek to the beginning of the Body stream. We do this even for the 1st try because + // the stream may not be at offset 0 when we first get it and we want the same behavior for the + // 1st try as for additional tries. + err = req.RewindBody() + if err != nil { + return + } + // RewindBody() restores Raw().Body to its original state, so set our rewindable after + if rwbody != nil { + req.Raw().Body = rwbody + } + + if options.TryTimeout == 0 { + resp, err = req.Next() + } else { + // Set the per-try time for this particular retry operation and then Do the operation. + tryCtx, tryCancel := context.WithTimeout(req.Raw().Context(), options.TryTimeout) + clone := req.Clone(tryCtx) + resp, err = clone.Next() // Make the request + // if the body was already downloaded or there was an error it's safe to cancel the context now + if err != nil { + tryCancel() + } else if _, ok := resp.Body.(*shared.NopClosingBytesReader); ok { + tryCancel() + } else { + // must cancel the context after the body has been read and closed + resp.Body = &contextCancelReadCloser{cf: tryCancel, body: resp.Body} + } + } + if err == nil { + log.Writef(log.EventRetryPolicy, "response %d", resp.StatusCode) + } else { + log.Writef(log.EventRetryPolicy, "error %v", err) + } + + if err == nil && !HasStatusCode(resp, options.StatusCodes...) { + // if there is no error and the response code isn't in the list of retry codes then we're done. + return + } else if ctxErr := req.Raw().Context().Err(); ctxErr != nil { + // don't retry if the parent context has been cancelled or its deadline exceeded + err = ctxErr + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + + // check if the error is not retriable + var nre errorinfo.NonRetriable + if errors.As(err, &nre) { + // the error says it's not retriable so don't retry + log.Writef(log.EventRetryPolicy, "non-retriable error %T", nre) + return + } + + if try == options.MaxRetries+1 { + // max number of tries has been reached, don't sleep again + log.Writef(log.EventRetryPolicy, "MaxRetries %d exceeded", options.MaxRetries) + return + } + + // drain before retrying so nothing is leaked + Drain(resp) + + // use the delay from retry-after if available + delay := shared.RetryAfter(resp) + if delay <= 0 { + delay = calcDelay(options, try) + } + log.Writef(log.EventRetryPolicy, "End Try #%d, Delay=%v", try, delay) + select { + case <-time.After(delay): + try++ + case <-req.Raw().Context().Done(): + err = req.Raw().Context().Err() + log.Writef(log.EventRetryPolicy, "abort due to %v", err) + return + } + } +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} + +// ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) + +// This struct is used when sending a body to the network +type retryableRequestBody struct { + body io.ReadSeeker // Seeking is required to support retries +} + +// Read reads a block of data from an inner stream and reports progress +func (b *retryableRequestBody) Read(p []byte) (n int, err error) { + return b.body.Read(p) +} + +func (b *retryableRequestBody) Seek(offset int64, whence int) (offsetFromStart int64, err error) { + return b.body.Seek(offset, whence) +} + +func (b *retryableRequestBody) Close() error { + // We don't want the underlying transport to close the request body on transient failures so this is a nop. + // The retry policy closes the request body upon success. + return nil +} + +func (b *retryableRequestBody) realClose() error { + if c, ok := b.body.(io.Closer); ok { + return c.Close() + } + return nil +} + +// ********** The following type/methods implement the contextCancelReadCloser + +// contextCancelReadCloser combines an io.ReadCloser with a cancel func. +// it ensures the cancel func is invoked once the body has been read and closed. +type contextCancelReadCloser struct { + cf context.CancelFunc + body io.ReadCloser +} + +func (rc *contextCancelReadCloser) Read(p []byte) (n int, err error) { + return rc.body.Read(p) +} + +func (rc *contextCancelReadCloser) Close() error { + err := rc.body.Close() + rc.cf() + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go new file mode 100644 index 000000000..2abcdc576 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_telemetry.go @@ -0,0 +1,79 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "fmt" + "net/http" + "os" + "runtime" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +type telemetryPolicy struct { + telemetryValue string +} + +// NewTelemetryPolicy creates a telemetry policy object that adds telemetry information to outgoing HTTP requests. +// The format is [ ]azsdk-go-/ . +// Pass nil to accept the default values; this is the same as passing a zero-value options. +func NewTelemetryPolicy(mod, ver string, o *policy.TelemetryOptions) policy.Policy { + if o == nil { + o = &policy.TelemetryOptions{} + } + tp := telemetryPolicy{} + if o.Disabled { + return &tp + } + b := &bytes.Buffer{} + // normalize ApplicationID + if o.ApplicationID != "" { + o.ApplicationID = strings.ReplaceAll(o.ApplicationID, " ", "/") + if len(o.ApplicationID) > 24 { + o.ApplicationID = o.ApplicationID[:24] + } + b.WriteString(o.ApplicationID) + b.WriteRune(' ') + } + b.WriteString(formatTelemetry(mod, ver)) + b.WriteRune(' ') + b.WriteString(platformInfo) + tp.telemetryValue = b.String() + return &tp +} + +func formatTelemetry(comp, ver string) string { + return fmt.Sprintf("azsdk-go-%s/%s", comp, ver) +} + +func (p telemetryPolicy) Do(req *policy.Request) (*http.Response, error) { + if p.telemetryValue == "" { + return req.Next() + } + // preserve the existing User-Agent string + if ua := req.Raw().Header.Get(shared.HeaderUserAgent); ua != "" { + p.telemetryValue = fmt.Sprintf("%s %s", p.telemetryValue, ua) + } + req.Raw().Header.Set(shared.HeaderUserAgent, p.telemetryValue) + return req.Next() +} + +// NOTE: the ONLY function that should write to this variable is this func +var platformInfo = func() string { + operatingSystem := runtime.GOOS // Default OS string + switch operatingSystem { + case "windows": + operatingSystem = os.Getenv("OS") // Get more specific OS information + case "linux": // accept default OS info + case "freebsd": // accept default OS info + } + return fmt.Sprintf("(%s; %s)", runtime.Version(), operatingSystem) +}() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go new file mode 100644 index 000000000..462468424 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go @@ -0,0 +1,324 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// FinalStateVia is the enumerated type for the possible final-state-via values. +type FinalStateVia = pollers.FinalStateVia + +const ( + // FinalStateViaAzureAsyncOp indicates the final payload comes from the Azure-AsyncOperation URL. + FinalStateViaAzureAsyncOp = pollers.FinalStateViaAzureAsyncOp + + // FinalStateViaLocation indicates the final payload comes from the Location URL. + FinalStateViaLocation = pollers.FinalStateViaLocation + + // FinalStateViaOriginalURI indicates the final payload comes from the original URL. + FinalStateViaOriginalURI = pollers.FinalStateViaOriginalURI + + // FinalStateViaOpLocation indicates the final payload comes from the Operation-Location URL. + FinalStateViaOpLocation = pollers.FinalStateViaOpLocation +) + +// NewPollerOptions contains the optional parameters for NewPoller. +type NewPollerOptions[T any] struct { + // FinalStateVia contains the final-state-via value for the LRO. + FinalStateVia FinalStateVia + + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPoller creates a Poller based on the provided initial response. +func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPollerOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + if options.Handler != nil { + return &Poller[T]{ + op: options.Handler, + resp: resp, + result: result, + }, nil + } + + defer resp.Body.Close() + // this is a back-stop in case the swagger is incorrect (i.e. missing one or more status codes for success). + // ideally the codegen should return an error if the initial response failed and not even create a poller. + if !pollers.StatusCodeValid(resp) { + return nil, errors.New("the operation failed or was cancelled") + } + + // determine the polling method + var opr PollingHandler[T] + var err error + if async.Applicable(resp) { + // async poller must be checked first as it can also have a location header + opr, err = async.New[T](pl, resp, options.FinalStateVia) + } else if op.Applicable(resp) { + // op poller must be checked before loc as it can also have a location header + opr, err = op.New[T](pl, resp, options.FinalStateVia) + } else if loc.Applicable(resp) { + opr, err = loc.New[T](pl, resp) + } else if body.Applicable(resp) { + // must test body poller last as it's a subset of the other pollers. + // TODO: this is ambiguous for PATCH/PUT if it returns a 200 with no polling headers (sync completion) + opr, err = body.New[T](pl, resp) + } else if m := resp.Request.Method; resp.StatusCode == http.StatusAccepted && (m == http.MethodDelete || m == http.MethodPost) { + // if we get here it means we have a 202 with no polling headers. + // for DELETE and POST this is a hard error per ARM RPC spec. + return nil, errors.New("response is missing polling URL") + } else { + opr, err = pollers.NewNopPoller[T](resp) + } + + if err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + resp: resp, + result: result, + }, nil +} + +// NewPollerFromResumeTokenOptions contains the optional parameters for NewPollerFromResumeToken. +type NewPollerFromResumeTokenOptions[T any] struct { + // Response contains a preconstructed response type. + // The final payload will be unmarshaled into it and returned. + Response *T + + // Handler[T] contains a custom polling implementation. + Handler PollingHandler[T] +} + +// NewPollerFromResumeToken creates a Poller from a resume token string. +func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options *NewPollerFromResumeTokenOptions[T]) (*Poller[T], error) { + if options == nil { + options = &NewPollerFromResumeTokenOptions[T]{} + } + result := options.Response + if result == nil { + result = new(T) + } + + if err := pollers.IsTokenValid[T](token); err != nil { + return nil, err + } + raw, err := pollers.ExtractToken(token) + if err != nil { + return nil, err + } + var asJSON map[string]interface{} + if err := json.Unmarshal(raw, &asJSON); err != nil { + return nil, err + } + + opr := options.Handler + // now rehydrate the poller based on the encoded poller type + if async.CanResume(asJSON) { + opr, _ = async.New[T](pl, nil, "") + } else if body.CanResume(asJSON) { + opr, _ = body.New[T](pl, nil) + } else if loc.CanResume(asJSON) { + opr, _ = loc.New[T](pl, nil) + } else if op.CanResume(asJSON) { + opr, _ = op.New[T](pl, nil, "") + } else if opr != nil { + log.Writef(log.EventLRO, "Resuming custom poller %T.", opr) + } else { + return nil, fmt.Errorf("unhandled poller token %s", string(raw)) + } + if err := json.Unmarshal(raw, &opr); err != nil { + return nil, err + } + return &Poller[T]{ + op: opr, + result: result, + }, nil +} + +// PollingHandler[T] abstracts the differences among poller implementations. +type PollingHandler[T any] interface { + // Done returns true if the LRO has reached a terminal state. + Done() bool + + // Poll fetches the latest state of the LRO. + Poll(context.Context) (*http.Response, error) + + // Result is called once the LRO has reached a terminal state. It populates the out parameter + // with the result of the operation. + Result(ctx context.Context, out *T) error +} + +// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state. +type Poller[T any] struct { + op PollingHandler[T] + resp *http.Response + err error + result *T + done bool +} + +// PollUntilDoneOptions contains the optional values for the Poller[T].PollUntilDone() method. +type PollUntilDoneOptions struct { + // Frequency is the time to wait between polling intervals in absence of a Retry-After header. Allowed minimum is one second. + // Pass zero to accept the default value (30s). + Frequency time.Duration +} + +// PollUntilDone will poll the service endpoint until a terminal state is reached, an error is received, or the context expires. +// It internally uses Poll(), Done(), and Result() in its polling loop, sleeping for the specified duration between intervals. +// options: pass nil to accept the default values. +// NOTE: the default polling frequency is 30 seconds which works well for most operations. However, some operations might +// benefit from a shorter or longer duration. +func (p *Poller[T]) PollUntilDone(ctx context.Context, options *PollUntilDoneOptions) (T, error) { + if options == nil { + options = &PollUntilDoneOptions{} + } + cp := *options + if cp.Frequency == 0 { + cp.Frequency = 30 * time.Second + } + + if cp.Frequency < time.Second { + return *new(T), errors.New("polling frequency minimum is one second") + } + + start := time.Now() + logPollUntilDoneExit := func(v interface{}) { + log.Writef(log.EventLRO, "END PollUntilDone() for %T: %v, total time: %s", p.op, v, time.Since(start)) + } + log.Writef(log.EventLRO, "BEGIN PollUntilDone() for %T", p.op) + if p.resp != nil { + // initial check for a retry-after header existing on the initial response + if retryAfter := shared.RetryAfter(p.resp); retryAfter > 0 { + log.Writef(log.EventLRO, "initial Retry-After delay for %s", retryAfter.String()) + if err := shared.Delay(ctx, retryAfter); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } + } + // begin polling the endpoint until a terminal state is reached + for { + resp, err := p.Poll(ctx) + if err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + if p.Done() { + logPollUntilDoneExit("succeeded") + return p.Result(ctx) + } + d := cp.Frequency + if retryAfter := shared.RetryAfter(resp); retryAfter > 0 { + log.Writef(log.EventLRO, "Retry-After delay for %s", retryAfter.String()) + d = retryAfter + } else { + log.Writef(log.EventLRO, "delay for %s", d.String()) + } + if err = shared.Delay(ctx, d); err != nil { + logPollUntilDoneExit(err) + return *new(T), err + } + } +} + +// Poll fetches the latest state of the LRO. It returns an HTTP response or error. +// If Poll succeeds, the poller's state is updated and the HTTP response is returned. +// If Poll fails, the poller's state is unmodified and the error is returned. +// Calling Poll on an LRO that has reached a terminal state will return the last HTTP response. +func (p *Poller[T]) Poll(ctx context.Context) (*http.Response, error) { + if p.Done() { + // the LRO has reached a terminal state, don't poll again + return p.resp, nil + } + resp, err := p.op.Poll(ctx) + if err != nil { + return nil, err + } + p.resp = resp + return p.resp, nil +} + +// Done returns true if the LRO has reached a terminal state. +// Once a terminal state is reached, call Result(). +func (p *Poller[T]) Done() bool { + return p.op.Done() +} + +// Result returns the result of the LRO and is meant to be used in conjunction with Poll and Done. +// If the LRO completed successfully, a populated instance of T is returned. +// If the LRO failed or was canceled, an *azcore.ResponseError error is returned. +// Calling this on an LRO in a non-terminal state will return an error. +func (p *Poller[T]) Result(ctx context.Context) (T, error) { + if !p.Done() { + return *new(T), errors.New("poller is in a non-terminal state") + } + if p.done { + // the result has already been retrieved, return the cached value + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil + } + err := p.op.Result(ctx, p.result) + var respErr *exported.ResponseError + if errors.As(err, &respErr) { + // the LRO failed. record the error + p.err = err + } else if err != nil { + // the call to Result failed, don't cache anything in this case + return *new(T), err + } + p.done = true + if p.err != nil { + return *new(T), p.err + } + return *p.result, nil +} + +// ResumeToken returns a value representing the poller that can be used to resume +// the LRO at a later time. ResumeTokens are unique per service operation. +// The token's format should be considered opaque and is subject to change. +// Calling this on an LRO in a terminal state will return an error. +func (p *Poller[T]) ResumeToken() (string, error) { + if p.Done() { + return "", errors.New("poller is in a terminal state") + } + tk, err := pollers.NewResumeToken[T](p.op) + if err != nil { + return "", err + } + return tk, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go new file mode 100644 index 000000000..21e5c578d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -0,0 +1,225 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "mime/multipart" + "reflect" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// Base64Encoding is usesd to specify which base-64 encoder/decoder to use when +// encoding/decoding a slice of bytes to/from a string. +type Base64Encoding int + +const ( + // Base64StdFormat uses base64.StdEncoding for encoding and decoding payloads. + Base64StdFormat Base64Encoding = 0 + + // Base64URLFormat uses base64.RawURLEncoding for encoding and decoding payloads. + Base64URLFormat Base64Encoding = 1 +) + +// NewRequest creates a new policy.Request with the specified input. +func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*policy.Request, error) { + return exported.NewRequest(ctx, httpMethod, endpoint) +} + +// JoinPaths concatenates multiple URL path segments into one path, +// inserting path separation characters as required. JoinPaths will preserve +// query parameters in the root path +func JoinPaths(root string, paths ...string) string { + if len(paths) == 0 { + return root + } + + qps := "" + if strings.Contains(root, "?") { + splitPath := strings.Split(root, "?") + root, qps = splitPath[0], splitPath[1] + } + + for i := 0; i < len(paths); i++ { + root = strings.TrimRight(root, "/") + paths[i] = strings.TrimLeft(paths[i], "/") + root += "/" + paths[i] + } + + if qps != "" { + if !strings.HasSuffix(root, "/") { + root += "/" + } + return root + "?" + qps + } + return root +} + +// EncodeByteArray will base-64 encode the byte slice v. +func EncodeByteArray(v []byte, format Base64Encoding) string { + if format == Base64URLFormat { + return base64.RawURLEncoding.EncodeToString(v) + } + return base64.StdEncoding.EncodeToString(v) +} + +// MarshalAsByteArray will base-64 encode the byte slice v, then calls SetBody. +// The encoded value is treated as a JSON string. +func MarshalAsByteArray(req *policy.Request, v []byte, format Base64Encoding) error { + // send as a JSON string + encode := fmt.Sprintf("\"%s\"", EncodeByteArray(v, format)) + return req.SetBody(exported.NopCloser(strings.NewReader(encode)), shared.ContentTypeAppJSON) +} + +// MarshalAsJSON calls json.Marshal() to get the JSON encoding of v then calls SetBody. +func MarshalAsJSON(req *policy.Request, v interface{}) error { + v = cloneWithoutReadOnlyFields(v) + b, err := json.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppJSON) +} + +// MarshalAsXML calls xml.Marshal() to get the XML encoding of v then calls SetBody. +func MarshalAsXML(req *policy.Request, v interface{}) error { + b, err := xml.Marshal(v) + if err != nil { + return fmt.Errorf("error marshalling type %T: %s", v, err) + } + // inclue the XML header as some services require it + b = []byte(xml.Header + string(b)) + return req.SetBody(exported.NopCloser(bytes.NewReader(b)), shared.ContentTypeAppXML) +} + +// SetMultipartFormData writes the specified keys/values as multi-part form +// fields with the specified value. File content must be specified as a ReadSeekCloser. +// All other values are treated as string values. +func SetMultipartFormData(req *policy.Request, formData map[string]interface{}) error { + body := bytes.Buffer{} + writer := multipart.NewWriter(&body) + for k, v := range formData { + if rsc, ok := v.(io.ReadSeekCloser); ok { + // this is the body to upload, the key is its file name + fd, err := writer.CreateFormFile(k, k) + if err != nil { + return err + } + // copy the data to the form file + if _, err = io.Copy(fd, rsc); err != nil { + return err + } + continue + } + // ensure the value is in string format + s, ok := v.(string) + if !ok { + s = fmt.Sprintf("%v", v) + } + if err := writer.WriteField(k, s); err != nil { + return err + } + } + if err := writer.Close(); err != nil { + return err + } + return req.SetBody(exported.NopCloser(bytes.NewReader(body.Bytes())), writer.FormDataContentType()) +} + +// SkipBodyDownload will disable automatic downloading of the response body. +func SkipBodyDownload(req *policy.Request) { + req.SetOperationValue(bodyDownloadPolicyOpValues{Skip: true}) +} + +// returns a clone of the object graph pointed to by v, omitting values of all read-only +// fields. if there are no read-only fields in the object graph, no clone is created. +func cloneWithoutReadOnlyFields(v interface{}) interface{} { + val := reflect.Indirect(reflect.ValueOf(v)) + if val.Kind() != reflect.Struct { + // not a struct, skip + return v + } + // first walk the graph to find any R/O fields. + // if there aren't any, skip cloning the graph. + if !recursiveFindReadOnlyField(val) { + return v + } + return recursiveCloneWithoutReadOnlyFields(val) +} + +// returns true if any field in the object graph of val contains the `azure:"ro"` tag value +func recursiveFindReadOnlyField(val reflect.Value) bool { + t := val.Type() + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + return true + } else if reflect.Indirect(val.Field(i)).Kind() == reflect.Struct && recursiveFindReadOnlyField(reflect.Indirect(val.Field(i))) { + return true + } + } + return false +} + +// clones the object graph of val. all non-R/O properties are copied to the clone +func recursiveCloneWithoutReadOnlyFields(val reflect.Value) interface{} { + t := val.Type() + clone := reflect.New(t) + // iterate over the fields, looking for the "azure" tag. + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + aztag := field.Tag.Get("azure") + if azureTagIsReadOnly(aztag) { + // omit from payload + continue + } + // clone field will receive the same value as the source field... + value := val.Field(i) + v := reflect.Indirect(value) + if v.IsValid() && v.Type() != reflect.TypeOf(time.Time{}) && v.Kind() == reflect.Struct { + // ...unless the source value is a struct, in which case we recurse to clone that struct. + // (We can't recursively clone time.Time because it contains unexported fields.) + c := recursiveCloneWithoutReadOnlyFields(v) + if field.Anonymous { + // NOTE: this does not handle the case of embedded fields of unexported struct types. + // this should be ok as we don't generate any code like this at present + value = reflect.Indirect(reflect.ValueOf(c)) + } else { + value = reflect.ValueOf(c) + } + } + reflect.Indirect(clone).Field(i).Set(value) + } + return clone.Interface() +} + +// returns true if the "azure" tag contains the option "ro" +func azureTagIsReadOnly(tag string) bool { + if tag == "" { + return false + } + parts := strings.Split(tag, ",") + for _, part := range parts { + if part == "ro" { + return true + } + } + return false +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go new file mode 100644 index 000000000..2322f0a20 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/response.go @@ -0,0 +1,137 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" +) + +// Payload reads and returns the response body or an error. +// On a successful read, the response body is cached. +// Subsequent reads will access the cached value. +func Payload(resp *http.Response) ([]byte, error) { + return exported.Payload(resp) +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + return exported.HasStatusCode(resp, statusCodes...) +} + +// UnmarshalAsByteArray will base-64 decode the received payload and place the result into the value pointed to by v. +func UnmarshalAsByteArray(resp *http.Response, v *[]byte, format Base64Encoding) error { + p, err := Payload(resp) + if err != nil { + return err + } + return DecodeByteArray(string(p), v, format) +} + +// UnmarshalAsJSON calls json.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsJSON(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = json.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// UnmarshalAsXML calls xml.Unmarshal() to unmarshal the received payload into the value pointed to by v. +func UnmarshalAsXML(resp *http.Response, v interface{}) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // TODO: verify early exit is correct + if len(payload) == 0 { + return nil + } + err = removeBOM(resp) + if err != nil { + return err + } + err = xml.Unmarshal(payload, v) + if err != nil { + err = fmt.Errorf("unmarshalling type %T: %s", v, err) + } + return err +} + +// Drain reads the response body to completion then closes it. The bytes read are discarded. +func Drain(resp *http.Response) { + if resp != nil && resp.Body != nil { + _, _ = io.Copy(ioutil.Discard, resp.Body) + resp.Body.Close() + } +} + +// removeBOM removes any byte-order mark prefix from the payload if present. +func removeBOM(resp *http.Response) error { + payload, err := Payload(resp) + if err != nil { + return err + } + // UTF8 + trimmed := bytes.TrimPrefix(payload, []byte("\xef\xbb\xbf")) + if len(trimmed) < len(payload) { + resp.Body.(shared.BytesSetter).Set(trimmed) + } + return nil +} + +// DecodeByteArray will base-64 decode the provided string into v. +func DecodeByteArray(s string, v *[]byte, format Base64Encoding) error { + if len(s) == 0 { + return nil + } + payload := string(s) + if payload[0] == '"' { + // remove surrounding quotes + payload = payload[1 : len(payload)-1] + } + switch format { + case Base64StdFormat: + decoded, err := base64.StdEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + case Base64URLFormat: + // use raw encoding as URL format should not contain any '=' characters + decoded, err := base64.RawURLEncoding.DecodeString(payload) + if err == nil { + *v = decoded + return nil + } + return err + default: + return fmt.Errorf("unrecognized byte array format: %d", format) + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go new file mode 100644 index 000000000..869bed511 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -0,0 +1,37 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +var defaultHTTPClient *http.Client + +func init() { + defaultTransport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + }, + } + defaultHTTPClient = &http.Client{ + Transport: defaultTransport, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go new file mode 100644 index 000000000..cadaef3d5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package streaming contains helpers for streaming IO operations and progress reporting. +package streaming diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go new file mode 100644 index 000000000..8563375af --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming/progress.go @@ -0,0 +1,72 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package streaming + +import ( + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" +) + +type progress struct { + rc io.ReadCloser + rsc io.ReadSeekCloser + pr func(bytesTransferred int64) + offset int64 +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return exported.NopCloser(rs) +} + +// NewRequestProgress adds progress reporting to an HTTP request's body stream. +func NewRequestProgress(body io.ReadSeekCloser, pr func(bytesTransferred int64)) io.ReadSeekCloser { + return &progress{ + rc: body, + rsc: body, + pr: pr, + offset: 0, + } +} + +// NewResponseProgress adds progress reporting to an HTTP response's body stream. +func NewResponseProgress(body io.ReadCloser, pr func(bytesTransferred int64)) io.ReadCloser { + return &progress{ + rc: body, + rsc: nil, + pr: pr, + offset: 0, + } +} + +// Read reads a block of data from an inner stream and reports progress +func (p *progress) Read(b []byte) (n int, err error) { + n, err = p.rc.Read(b) + if err != nil && err != io.EOF { + return + } + p.offset += int64(n) + // Invokes the user's callback method to report progress + p.pr(p.offset) + return +} + +// Seek only expects a zero or from beginning. +func (p *progress) Seek(offset int64, whence int) (int64, error) { + // This should only ever be called with offset = 0 and whence = io.SeekStart + n, err := p.rsc.Seek(offset, whence) + if err == nil { + p.offset = int64(n) + } + return n, err +} + +// requestBodyProgress supports Close but the underlying stream may not; if it does, Close will close it. +func (p *progress) Close() error { + return p.rc.Close() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go new file mode 100644 index 000000000..faa98c9dc --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/doc.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +// Package to contains various type-conversion helper functions. +package to diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go new file mode 100644 index 000000000..e0e4817b9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/to/to.go @@ -0,0 +1,21 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package to + +// Ptr returns a pointer to the provided value. +func Ptr[T any](v T) *T { + return &v +} + +// SliceOfPtrs returns a slice of *T from the specified values. +func SliceOfPtrs[T any](vv ...T) []*T { + slc := make([]*T, len(vv)) + for i := range vv { + slc[i] = Ptr(vv[i]) + } + return slc +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt new file mode 100644 index 000000000..48ea6616b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go new file mode 100644 index 000000000..1fdc53615 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/diag.go @@ -0,0 +1,51 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag + +import ( + "fmt" + "runtime" + "strings" +) + +// Caller returns the file and line number of a frame on the caller's stack. +// If the funtion fails an empty string is returned. +// skipFrames - the number of frames to skip when determining the caller. +// Passing a value of 0 will return the immediate caller of this function. +func Caller(skipFrames int) string { + if pc, file, line, ok := runtime.Caller(skipFrames + 1); ok { + // the skipFrames + 1 is to skip ourselves + frame := runtime.FuncForPC(pc) + return fmt.Sprintf("%s()\n\t%s:%d", frame.Name(), file, line) + } + return "" +} + +// StackTrace returns a formatted stack trace string. +// If the funtion fails an empty string is returned. +// skipFrames - the number of stack frames to skip before composing the trace string. +// totalFrames - the maximum number of stack frames to include in the trace string. +func StackTrace(skipFrames, totalFrames int) string { + pcCallers := make([]uintptr, totalFrames) + if frames := runtime.Callers(skipFrames, pcCallers); frames == 0 { + return "" + } + frames := runtime.CallersFrames(pcCallers) + sb := strings.Builder{} + for { + frame, more := frames.Next() + sb.WriteString(frame.Function) + sb.WriteString("()\n\t") + sb.WriteString(frame.File) + sb.WriteRune(':') + sb.WriteString(fmt.Sprintf("%d\n", frame.Line)) + if !more { + break + } + } + return sb.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go new file mode 100644 index 000000000..66bf13e5f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/diag/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package diag diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go new file mode 100644 index 000000000..8c6eacb61 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go new file mode 100644 index 000000000..ade7b348e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -0,0 +1,16 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package errorinfo + +// NonRetriable represents a non-transient error. This works in +// conjunction with the retry policy, indicating that the error condition +// is idempotent, so no retries will be attempted. +// Use errors.As() to access this interface in the error chain. +type NonRetriable interface { + error + NonRetriable() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go new file mode 100644 index 000000000..d7876d297 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go new file mode 100644 index 000000000..4f1dcf1b7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -0,0 +1,104 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package log + +import ( + "fmt" + "os" + "time" +) + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// NOTE: The following are exported as public surface area from azcore. DO NOT MODIFY +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Event is used to group entries. Each group can be toggled on or off. +type Event string + +// SetEvents is used to control which events are written to +// the log. By default all log events are writen. +func SetEvents(cls ...Event) { + log.cls = cls +} + +// SetListener will set the Logger to write to the specified listener. +func SetListener(lst func(Event, string)) { + log.lst = lst +} + +/////////////////////////////////////////////////////////////////////////////////////////////////// +// END PUBLIC SURFACE AREA +/////////////////////////////////////////////////////////////////////////////////////////////////// + +// Should returns true if the specified log event should be written to the log. +// By default all log events will be logged. Call SetEvents() to limit +// the log events for logging. +// If no listener has been set this will return false. +// Calling this method is useful when the message to log is computationally expensive +// and you want to avoid the overhead if its log event is not enabled. +func Should(cls Event) bool { + if log.lst == nil { + return false + } + if log.cls == nil || len(log.cls) == 0 { + return true + } + for _, c := range log.cls { + if c == cls { + return true + } + } + return false +} + +// Write invokes the underlying listener with the specified event and message. +// If the event shouldn't be logged or there is no listener then Write does nothing. +func Write(cls Event, message string) { + if !Should(cls) { + return + } + log.lst(cls, message) +} + +// Writef invokes the underlying listener with the specified event and formatted message. +// If the event shouldn't be logged or there is no listener then Writef does nothing. +func Writef(cls Event, format string, a ...interface{}) { + if !Should(cls) { + return + } + log.lst(cls, fmt.Sprintf(format, a...)) +} + +// TestResetEvents is used for TESTING PURPOSES ONLY. +func TestResetEvents() { + log.cls = nil +} + +// logger controls which events to log and writing to the underlying log. +type logger struct { + cls []Event + lst func(Event, string) +} + +// the process-wide logger +var log logger + +func init() { + initLogging() +} + +// split out for testing purposes +func initLogging() { + if cls := os.Getenv("AZURE_SDK_GO_LOGGING"); cls == "all" { + // cls could be enhanced to support a comma-delimited list of log events + log.lst = func(cls Event, msg string) { + // simple console logger, it writes to stderr in the following format: + // [time-stamp] Event: message + fmt.Fprintf(os.Stderr, "[%s] %s: %s\n", time.Now().Format(time.StampMicro), cls, msg) + } + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go new file mode 100644 index 000000000..b23f3860c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -0,0 +1,120 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package temporal + +import ( + "sync" + "time" +) + +// AcquireResource abstracts a method for refreshing a temporal resource. +type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) + +// Resource is a temporal resource (usually a credential) that requires periodic refreshing. +type Resource[TResource, TState any] struct { + // cond is used to synchronize access to the shared resource embodied by the remaining fields + cond *sync.Cond + + // acquiring indicates that some thread/goroutine is in the process of acquiring/updating the resource + acquiring bool + + // resource contains the value of the shared resource + resource TResource + + // expiration indicates when the shared resource expires; it is 0 if the resource was never acquired + expiration time.Time + + // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource + lastAttempt time.Time + + // acquireResource is the callback function that actually acquires the resource + acquireResource AcquireResource[TResource, TState] +} + +// NewResource creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { + return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} +} + +// Get returns the underlying resource. +// If the resource is fresh, no refresh is performed. +func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { + // If the resource is expiring within this time window, update it eagerly. + // This allows other threads/goroutines to keep running by using the not-yet-expired + // resource value while one thread/goroutine updates the resource. + const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration + const backoff = 30 * time.Second // Minimum wait time between eager update attempts + + now, acquire, expired, resource := time.Now(), false, false, er.resource + // acquire exclusive lock + er.cond.L.Lock() + for { + expired = er.expiration.IsZero() || er.expiration.Before(now) + if expired { + // The resource was never acquired or has expired + if !er.acquiring { + // If another thread/goroutine is not acquiring/updating the resource, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // Getting here means that this thread/goroutine will wait for the updated resource + } else if er.expiration.Add(-window).Before(now) { + // The resource is valid but is expiring within the time window + if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted + // to do so within the last 30 seconds, this thread/goroutine will do it + er.acquiring, acquire = true, true + break + } + // This thread/goroutine will use the existing resource value while another updates it + resource = er.resource + break + } else { + // The resource is not close to expiring, this thread/goroutine should use its current value + resource = er.resource + break + } + // If we get here, wait for the new resource value to be acquired/updated + er.cond.Wait() + } + er.cond.L.Unlock() // Release the lock so no threads/goroutines are blocked + + var err error + if acquire { + // This thread/goroutine has been selected to acquire/update the resource + var expiration time.Time + var newValue TResource + er.lastAttempt = now + newValue, expiration, err = er.acquireResource(state) + + // Atomically, update the shared resource's new value & expiration. + er.cond.L.Lock() + if err == nil { + // Update resource & expiration, return the new value + resource = newValue + er.resource, er.expiration = resource, expiration + } else if !expired { + // An eager update failed. Discard the error and return the current--still valid--resource value + err = nil + } + er.acquiring = false // Indicate that no thread/goroutine is currently acquiring the resource + + // Wake up any waiting threads/goroutines since there is a resource they can ALL use + er.cond.L.Unlock() + er.cond.Broadcast() + } + return resource, err // Return the resource this thread/goroutine can use +} + +// Expire marks the resource as expired, ensuring it's refreshed on the next call to Get(). +func (er *Resource[TResource, TState]) Expire() { + er.cond.L.Lock() + defer er.cond.L.Unlock() + + // Reset the expiration as if we never got this resource to begin with + er.expiration = time.Time{} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go new file mode 100644 index 000000000..a3824bee8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/doc.go @@ -0,0 +1,7 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go new file mode 100644 index 000000000..278ac9cd1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/uuid/uuid.go @@ -0,0 +1,76 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package uuid + +import ( + "crypto/rand" + "errors" + "fmt" + "strconv" +) + +// The UUID reserved variants. +const ( + reservedRFC4122 byte = 0x40 +) + +// A UUID representation compliant with specification in RFC4122 document. +type UUID [16]byte + +// New returns a new UUID using the RFC4122 algorithm. +func New() (UUID, error) { + u := UUID{} + // Set all bits to pseudo-random values. + // NOTE: this takes a process-wide lock + _, err := rand.Read(u[:]) + if err != nil { + return u, err + } + u[8] = (u[8] | reservedRFC4122) & 0x7F // u.setVariant(ReservedRFC4122) + + var version byte = 4 + u[6] = (u[6] & 0xF) | (version << 4) // u.setVersion(4) + return u, nil +} + +// String returns the UUID in "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" format. +func (u UUID) String() string { + return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} + +// Parse parses a string formatted as "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" +// or "{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}" into a UUID. +func Parse(s string) (UUID, error) { + var uuid UUID + // ensure format + switch len(s) { + case 36: + // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + case 38: + // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} + s = s[1:37] + default: + return uuid, errors.New("invalid UUID format") + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return uuid, errors.New("invalid UUID format") + } + // parse chunks + for i, x := range [16]int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + b, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return uuid, fmt.Errorf("invalid UUID format: %s", err) + } + uuid[i] = byte(b) + } + return uuid, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md new file mode 100644 index 000000000..db095b3a2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -0,0 +1,54 @@ +# Release History + +## 0.4.1 (2022-05-12) + +### Other Changes +* Updated to latest `azcore` and `internal` modules + +## 0.4.0 (2022-04-19) + +### Breaking Changes +* Fixed Issue #17150 : Renaming/refactoring high level methods. +* Fixed Issue #16972 : Constructors should return clients by reference. +* Renaming the options bags to match the naming convention same as that of response. The behaviour of options bags remains the same. + +### Bugs Fixed +* Fixed Issue #17515 : SetTags options bag missing leaseID. +* Fixed Issue #17423 : Drop "Type" suffix from `GeoReplicationStatusType`. +* Fixed Issue #17335 : Nil pointer exception when passing nil options bag in `ListBlobsFlat` API call. +* Fixed Issue #17188 : `BlobURLParts` not supporting VersionID +* Fixed Issue #17152 , Issue #17131 , Issue #17061 : `UploadStreamToBlockBlob` / `UploadStreamToBlockBlob` methods ignoring the options bag. +* Fixed Issue #16920 : Fixing error handling example. +* Fixed Issue #16786 : Refactoring of autorest code generation definition and adding necessary transformations. +* Fixed Issue #16679 : Response parsing issue in List blobs API. + +## 0.3.0 (2022-02-09) + +### Breaking Changes + +* Updated to latest `azcore`. Public surface area is unchanged. +* [#16978](https://github.com/Azure/azure-sdk-for-go/pull/16978): The `DownloadResponse.Body` parameter is + now `*RetryReaderOptions`. + +### Bugs Fixed + +* Fixed Issue #16193 : `azblob.GetSASToken` wrong signed resource. +* Fixed Issue #16223 : `HttpRange` does not expose its fields. +* Fixed Issue #16254 : Issue passing reader to upload `BlockBlobClient` +* Fixed Issue #16295 : Problem with listing blobs by using of `ListBlobsHierarchy()` +* Fixed Issue #16542 : Empty `StorageError` in the Azurite environment +* Fixed Issue #16679 : Unable to access Metadata when listing blobs +* Fixed Issue #16816 : `ContainerClient.GetSASToken` doesn't allow list permission. +* Fixed Issue #16988 : Too many arguments in call to `runtime.NewResponseError` + +## 0.2.0 (2021-11-03) + +### Breaking Changes + +* Clients now have one constructor per authentication method + +## 0.1.0 (2021-09-13) + +### Features Added + +* This is the initial preview release of the `azblob` library diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt new file mode 100644 index 000000000..d1ca00f20 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/LICENSE.txt @@ -0,0 +1,21 @@ + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE \ No newline at end of file diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md new file mode 100644 index 000000000..32a10a005 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -0,0 +1,397 @@ +# Azure Blob Storage SDK for Go + +## Introduction + +The Microsoft Azure Storage SDK for Go allows you to build applications that takes advantage of Azure's scalable cloud +storage. This is the new beta client module for Azure Blob Storage, which follows +our [Azure SDK Design Guidelines for Go](https://azure.github.io/azure-sdk/golang_introduction.html) and replaces the +previous beta [azblob package](https://github.com/azure/azure-storage-blob-go). + +## Getting Started + +The Azure Blob SDK can access an Azure Storage account. + +### Prerequisites + +* Go versions 1.18 or higher +* You must have an [Azure storage account][azure_storage_account]. If you need to create one, you can use + the [Azure Cloud Shell](https://shell.azure.com/bash) to create one with these commands (replace `my-resource-group` + and `mystorageaccount` with your own unique names): + (Optional) if you want a new resource group to hold the Storage Account: + ``` + az group create --name my-resource-group --location westus2 + ``` + Create the storage account: + ``` + az storage account create --resource-group my-resource-group --name mystorageaccount + ``` + + The storage account name can be queried with: + ``` + az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob" + ``` + You can set this as an environment variable with: + ```bash + # PowerShell + $ENV:AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount" + # bash + export AZURE_STORAGE_ACCOUNT_NAME="mystorageaccount" + ``` + + Query your storage account keys: + ``` + az storage account keys list --resource-group my-resource-group -n mystorageaccount + ``` + + Output: + ```json + [ + { + "creationTime": "2022-02-07T17:18:44.088870+00:00", + "keyName": "key1", + "permissions": "FULL", + "value": "..." + }, + { + "creationTime": "2022-02-07T17:18:44.088870+00:00", + "keyName": "key2", + "permissions": "FULL", + "value": "..." + } + ] + ``` + + ```bash + # PowerShell + $ENV:AZURE_STORAGE_ACCOUNT_KEY="" + # Bash + export AZURE_STORAGE_ACCOUNT_KEY="" + ``` + > You can obtain your account key from the Azure Portal under the "Access Keys" section on the left-hand pane of your storage account. + +#### Create account + +* To create a new Storage account, you can use [Azure Portal][azure_portal_create_account] + , [Azure PowerShell][azure_powershell_create_account], or [Azure CLI][azure_cli_create_account]. + +### Install the package + +* Install the Azure Blob Storage client module for Go with `go get`: + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +``` + +> Optional: If you are going to use AAD authentication, install the `azidentity` package: + +```bash +go get github.com/Azure/azure-sdk-for-go/sdk/azidentity +``` + +#### Create the client + +`azblob` allows you to interact with three types of resources :- + +* [Azure storage accounts][azure_storage_account]. +* [Containers](https://azure.microsoft.com/en-in/overview/what-is-a-container/#overview) within those storage accounts. +* [Blobs](https://azure.microsoft.com/en-in/services/storage/blobs/#overview) (block blobs/ page blobs/ append blobs) + within those containers. + +Interaction with these resources starts with an instance of a [client](#clients). To create a client object, you will +need the account's blob service endpoint URL and a credential that allows you to access the account. The `endpoint` can +be found on the page for your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" +section or by running the following Azure CLI command: + +```bash +# Get the blob service URL for the account +az storage account show -n mystorageaccount -g my-resource-group --query "primaryEndpoints.blob" +``` + +Once you have the account URL, it can be used to create the service client: + +```golang +cred, err := azblob.NewSharedKeyCredential("myAccountName", "myAccountKey") +handle(err) +serviceClient, err := azblob.NewServiceClientWithSharedKey("https://.blob.core.windows.net/", cred, nil) +handle(err) +``` + +For more information about blob service URL's and how to configure custom domain names for Azure Storage check out +the [official documentation][azure_portal_account_url] + +#### Types of credentials + +The azblob clients support authentication via Shared Key Credential, Connection String, Shared Access Signature, or any +of the `azidentity` types that implement the `azcore.TokenCredential` interface. + +##### 1. Creating the client from a shared key + +To use an account [shared key][azure_shared_key] (aka account key or access key), provide the key as a string. This can +be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access Keys" section or by +running the following Azure CLI command: + +```bash +az storage account keys list -g my-resource-group -n mystorageaccount +``` + +Use Shared Key authentication as the credential parameter to authenticate the client: + +```golang +credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey") +handle(err) +serviceClient, err := azblob.NewServiceClientWithSharedKey("https://.blob.core.windows.net/", credential, nil) +handle(err) +``` + +##### 2. Creating the client from a connection string + +You can use connection string, instead of providing the account URL and credential separately, for authentication as +well. To do this, pass the connection string to the client's `NewServiceClientFromConnectionString` method. The +connection string can be found in your storage account in the [Azure Portal][azure_portal_account_url] under the "Access +Keys" section or with the following Azure CLI command: + +```bash +az storage account show-connection-string -g my-resource-group -n mystorageaccount +``` + +```golang +connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" +serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil) +``` + +##### 3. Creating the client from a SAS token + +To use a [shared access signature (SAS) token][azure_sas_token], provide the token as a string. You can generate a SAS +token from the Azure Portal +under [Shared access signature](https://docs.microsoft.com/rest/api/storageservices/create-service-sas) or use +the `ServiceClient.GetSASToken` or `ContainerClient.GetSASToken()` methods. + +```golang +credential, err := azblob.NewSharedKeyCredential("accountName", "accountKey") +handle(err) +serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), credential, nil) +handle(err) +// Provide the convenience function with relevant info (services, resource types, permissions, and duration) +// The SAS token will be valid from this moment onwards. +accountSAS, err := serviceClient.GetSASToken(AccountSASResourceTypes{Object: true, Service: true, Container: true}, +AccountSASPermissions{Read: true, List: true}, AccountSASServices{Blob: true}, time.Now(), time.Now().Add(48*time.Hour)) +handle(err) +sasURL := fmt.Sprintf("https://%s.blob.core.windows.net/?%s", accountName, accountSAS) + +// The sasURL can be used to authenticate a client without need for a credential +serviceClient, err = NewServiceClientWithNoCredential(sasURL, nil) +handle(err) +``` + +### Clients + +Three different clients are provided to interact with the various components of the Blob Service: + +1. **`ServiceClient`** + * Get and set account settings. + * Query, create, and delete containers within the account. + +2. **`ContainerClient`** + * Get and set container access settings, properties, and metadata. + * Create, delete, and query blobs within the container. + * `ContainerLeaseClient` to support container lease management. + +3. **`BlobClient`** + * `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient` + * Get and set blob properties. + * Perform CRUD operations on a given blob. + * `BlobLeaseClient` to support blob lease management. + +### Example + +```go +// Use your storage account's name and key to create a credential object, used to access your account. +// You can obtain these details from the Azure Portal. +accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") +if !ok { + handle(errors.New("AZURE_STORAGE_ACCOUNT_NAME could not be found")) +} + +accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") +if !ok { + handle(errors.New("AZURE_STORAGE_ACCOUNT_KEY could not be found")) +} +cred, err := NewSharedKeyCredential(accountName, accountKey) +handle(err) + +// Open up a service client. +// You'll need to specify a service URL, which for blob endpoints usually makes up the syntax http(s)://.blob.core.windows.net/ +service, err := NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) +handle(err) + +// All operations in the Azure Blob Storage SDK for Go operate on a context.Context, allowing you to control cancellation/timeout. +ctx := context.Background() // This example has no expiry. + +// This example showcases several common operations to help you get started, such as: + +// ===== 1. Creating a container ===== + +// First, branch off of the service client and create a container client. +container := service.NewContainerClient("mycontainer") + +// Then, fire off a create operation on the container client. +// Note that, all service-side requests have an options bag attached, allowing you to specify things like metadata, public access types, etc. +// Specifying nil omits all options. +_, err = container.Create(ctx, nil) +handle(err) + +// ===== 2. Uploading/downloading a block blob ===== +// We'll specify our data up-front, rather than reading a file for simplicity's sake. +data := "Hello world!" + +// Branch off of the container into a block blob client +blockBlob := container.NewBlockBlobClient("HelloWorld.txt") + +// Upload data to the block blob +_, err = blockBlob.Upload(ctx, NopCloser(strings.NewReader(data)), nil) +handle(err) + +// Download the blob's contents and ensure that the download worked properly +get, err := blockBlob.Download(ctx, nil) +handle(err) + +// Open a buffer, reader, and then download! +downloadedData := &bytes.Buffer{} +// RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. +reader := get.Body(RetryReaderOptions{}) +_, err = downloadedData.ReadFrom(reader) +handle(err) +err = reader.Close() +handle(err) +if data != downloadedData.String() { + handle(errors.New("downloaded data doesn't match uploaded data")) +} + +// ===== 3. list blobs ===== +// The ListBlobs and ListContainers APIs return two channels, a values channel, and an errors channel. +// You should enumerate on a range over the values channel, and then check the errors channel, as only ONE value will ever be passed to the errors channel. +// The AutoPagerTimeout defines how long it will wait to place into the items channel before it exits & cleans itself up. A zero time will result in no timeout. +pager := container.ListBlobsFlat(nil) + +for pager.NextPage(ctx) { + resp := pager.PageResponse() + + for _, v := range resp.ContainerListBlobFlatSegmentResult.Segment.BlobItems { + fmt.Println(*v.Name) + } +} + +if err = pager.Err(); err != nil { + handle(err) +} + +// Delete the blob we created earlier. +_, err = blockBlob.Delete(ctx, nil) +handle(err) + +// Delete the container we created earlier. +_, err = container.Delete(ctx, nil) +handle(err) +``` + +## Troubleshooting + +### Error Handling + +All I/O operations will return an `error` that can be investigated to discover more information about the error. In +addition, you can investigate the raw response of any response object: + +```golang +var storageErr *azblob.StorageError +resp, err := serviceClient.CreateContainer(context.Background(), "testcontainername", nil) +if err != nil && errors.As(err, &storageErr) { + // do something with storageErr.Response() +} +``` + +### Logging + +This module uses the classification based logging implementation in azcore. To turn on logging +set `AZURE_SDK_GO_LOGGING` to `all`. + +If you only want to include logs for `azblob`, you must create your own logger and set the log classification +as `LogCredential`. + +To obtain more detailed logging, including request/response bodies and header values, make sure to leave the logger as +default or enable the `LogRequest` and/or `LogResponse` classificatons. A logger that only includes credential logs can +be like the following: + +```golang +import azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" +// Set log to output to the console +azlog.SetListener(func (cls azlog.Classification, msg string) { + fmt.Println(msg) // printing log out to the console +}) + +// Includes only requests and responses in credential logs +azlog.SetClassifications(azlog.Request, azlog.Response) +``` + +> CAUTION: logs from credentials contain sensitive information. +> These logs must be protected to avoid compromising account security. +> + +## License + +This project is licensed under MIT. + +## Provide Feedback + +If you encounter bugs or have suggestions, please +[open an issue](https://github.com/Azure/azure-sdk-for-go/issues) and assign the `Azure.AzBlob` label. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License +Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For +details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate +the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to +do this once across all repos using our CLA. + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + + + + +[azure_subscription]:https://azure.microsoft.com/free/ + +[azure_storage_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal + +[azure_portal_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-portal + +[azure_powershell_create_account]:https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-powershell + +[azure_cli_create_account]: https://docs.microsoft.com/azure/storage/common/storage-account-create?tabs=azure-cli + +[azure_cli_account_url]:https://docs.microsoft.com/cli/azure/storage/account?view=azure-cli-latest#az-storage-account-show + +[azure_powershell_account_url]:https://docs.microsoft.com/powershell/module/az.storage/get-azstorageaccount?view=azps-4.6.1 + +[azure_portal_account_url]:https://docs.microsoft.com/azure/storage/common/storage-account-overview#storage-account-endpoints + +[azure_sas_token]:https://docs.microsoft.com/azure/storage/common/storage-sas-overview + +[azure_shared_key]:https://docs.microsoft.com/rest/api/storageservices/authorize-with-shared-key + +[azure_core_ref_docs]:https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azcore + +[azure_core_readme]: https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azcore/README.md + +[blobs_error_codes]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-error-codes + +[msft_oss_coc]:https://opensource.microsoft.com/codeofconduct/ + +[msft_oss_coc_faq]:https://opensource.microsoft.com/codeofconduct/faq/ + +[contact_msft_oss]:mailto:opencode@microsoft.com + +[blobs_rest]: https://docs.microsoft.com/en-us/rest/api/storageservices/blob-service-rest-api diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md new file mode 100644 index 000000000..0a391904a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/autorest.md @@ -0,0 +1,171 @@ +# Code Generation - Azure Blob SDK for Golang + + + +```bash +cd swagger +autorest autorest.md +gofmt -w generated/* +``` + +### Settings + +```yaml +go: true +clear-output-folder: false +version: "^3.0.0" +license-header: MICROSOFT_MIT_NO_VERSION +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" +module: "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" +credential-scope: "https://storage.azure.com/.default" +output-folder: internal/ +file-prefix: "zz_generated_" +openapi-type: "data-plane" +verbose: true +security: AzureKey +module-version: "0.3.0" +modelerfour: + group-parameters: false + seal-single-value-enum-by-default: true + lenient-model-deduplication: true +export-clients: false +use: "@autorest/go@4.0.0-preview.36" +``` + +### Fix BlobMetadata. + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.BlobMetadata["properties"]; + +``` + +### Don't include container name or blob in path - we have direct URIs. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('/{containerName}/{blob}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName") && false == param['$ref'].endsWith("#/parameters/Blob"))}); + } + else if (property.includes('/{containerName}')) + { + $[property]["parameters"] = $[property]["parameters"].filter(function(param) { return (typeof param['$ref'] === "undefined") || (false == param['$ref'].endsWith("#/parameters/ContainerName"))}); + } + } +``` + +### Remove DataLake stuff. + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"] + transform: > + for (const property in $) + { + if (property.includes('filesystem')) + { + delete $[property]; + } + } +``` + +### Remove DataLakeStorageError + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.DataLakeStorageError; +``` + +### Fix 304s + +``` yaml +directive: +- from: swagger-document + where: $["x-ms-paths"]["/{containerName}/{blob}"] + transform: > + $.get.responses["304"] = { + "description": "The condition specified using HTTP conditional header(s) is not met.", + "x-az-response-name": "ConditionNotMetError", + "headers": { "x-ms-error-code": { "x-ms-client-name": "ErrorCode", "type": "string" } } + }; +``` + +### Fix GeoReplication + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.GeoReplication.properties.Status["x-ms-enum"]; + $.GeoReplication.properties.Status["x-ms-enum"] = { + "name": "BlobGeoReplicationStatus", + "modelAsString": false + }; +``` + +### Fix RehydratePriority + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + delete $.RehydratePriority["x-ms-enum"]; + $.RehydratePriority["x-ms-enum"] = { + "name": "RehydratePriority", + "modelAsString": false + }; +``` + +### Fix BlobDeleteType + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.BlobDeleteType.enum; + $.BlobDeleteType.enum = [ + "None", + "Permanent" + ]; +``` + +### Fix EncryptionAlgorithm + +``` yaml +directive: +- from: swagger-document + where: $.parameters + transform: > + delete $.EncryptionAlgorithm.enum; + $.EncryptionAlgorithm.enum = [ + "None", + "AES256" + ]; +``` + +### Fix XML string "ObjectReplicationMetadata" to "OrMetadata" + +``` yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal.properties["OrMetadata"] = $.BlobItemInternal.properties["ObjectReplicationMetadata"]; + delete $.BlobItemInternal.properties["ObjectReplicationMetadata"]; +``` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go new file mode 100644 index 000000000..14c7feda1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bytes_writer.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "errors" +) + +type bytesWriter []byte + +func newBytesWriter(b []byte) bytesWriter { + return b +} + +func (c bytesWriter) WriteAt(b []byte, off int64) (int, error) { + if off >= int64(len(c)) || off < 0 { + return 0, errors.New("offset value is out of range") + } + + n := copy(c[int(off):], b) + if n < len(b) { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go new file mode 100644 index 000000000..d5ccdfb40 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/chunkwriting.go @@ -0,0 +1,231 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal" + "io" + "sync" + "sync/atomic" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +// blockWriter provides methods to upload blocks that represent a file to a server and commit them. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type blockWriter interface { + StageBlock(context.Context, string, io.ReadSeekCloser, *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) + CommitBlockList(context.Context, []string, *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) +} + +// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. +// TODO(someone): The existing model provides a buffer size and buffer limit as limiting factors. The buffer size is probably +// useless other than needing to be above some number, as the network stack is going to hack up the buffer over some size. The +// max buffers is providing a cap on how much memory we use (by multiplying it times the buffer size) and how many go routines can upload +// at a time. I think having a single max memory dial would be more efficient. We can choose an internal buffer size that works +// well, 4 MiB or 8 MiB, and auto-scale to as many goroutines within the memory limit. This gives a single dial to tweak and we can +// choose a max value for the memory setting based on internal transfers within Azure (which will give us the maximum throughput model). +// We can even provide a utility to dial this number in for customer networks to optimize their copies. +func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) { + if err := o.defaults(); err != nil { + return BlockBlobCommitBlockListResponse{}, err + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + var err error + generatedUuid, err := uuid.New() + if err != nil { + return BlockBlobCommitBlockListResponse{}, err + } + + cp := &copier{ + ctx: ctx, + cancel: cancel, + reader: from, + to: to, + id: newID(generatedUuid), + o: o, + errCh: make(chan error, 1), + } + + // Send all our chunks until we get an error. + for { + if err = cp.sendChunk(); err != nil { + break + } + } + // If the error is not EOF, then we have a problem. + if err != nil && !errors.Is(err, io.EOF) { + return BlockBlobCommitBlockListResponse{}, err + } + + // Close out our upload. + if err := cp.close(); err != nil { + return BlockBlobCommitBlockListResponse{}, err + } + + return cp.result, nil +} + +// copier streams a file via chunks in parallel from a reader representing a file. +// Do not use directly, instead use copyFromReader(). +type copier struct { + // ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case, + // the copier has the lifetime of a function call, so it's fine. + ctx context.Context + cancel context.CancelFunc + + // reader is the source to be written to storage. + reader io.Reader + // to is the location we are writing our chunks to. + to blockWriter + + // o contains our options for uploading. + o UploadStreamOptions + + // id provides the ids for each chunk. + id *id + + //// num is the current chunk we are on. + //num int32 + //// ch is used to pass the next chunk of data from our reader to one of the writers. + //ch chan copierChunk + + // errCh is used to hold the first error from our concurrent writers. + errCh chan error + // wg provides a count of how many writers we are waiting to finish. + wg sync.WaitGroup + + // result holds the final result from blob storage after we have submitted all chunks. + result BlockBlobCommitBlockListResponse +} + +type copierChunk struct { + buffer []byte + id string + length int +} + +// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error +// it returns that error. Otherwise, it is nil. getErr supports only returning an error once per copier. +func (c *copier) getErr() error { + select { + case err := <-c.errCh: + return err + default: + } + return c.ctx.Err() +} + +// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel. +// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF. +func (c *copier) sendChunk() error { + if err := c.getErr(); err != nil { + return err + } + + buffer := c.o.TransferManager.Get() + if len(buffer) == 0 { + return fmt.Errorf("TransferManager returned a 0 size buffer, this is a bug in the manager") + } + + n, err := io.ReadFull(c.reader, buffer) + if n > 0 { + // Some data was read, schedule the write. + id := c.id.next() + c.wg.Add(1) + c.o.TransferManager.Run( + func() { + defer c.wg.Done() + c.write(copierChunk{buffer: buffer, id: id, length: n}) + }, + ) + } else { + // Return the unused buffer to the manager. + c.o.TransferManager.Put(buffer) + } + + if err == nil { + return nil + } else if err == io.EOF || err == io.ErrUnexpectedEOF { + return io.EOF + } + + if cerr := c.getErr(); cerr != nil { + return cerr + } + + return err +} + +// write uploads a chunk to blob storage. +func (c *copier) write(chunk copierChunk) { + defer c.o.TransferManager.Put(chunk.buffer) + + if err := c.ctx.Err(); err != nil { + return + } + stageBlockOptions := c.o.getStageBlockOptions() + _, err := c.to.StageBlock(c.ctx, chunk.id, internal.NopCloser(bytes.NewReader(chunk.buffer[:chunk.length])), stageBlockOptions) + if err != nil { + c.errCh <- fmt.Errorf("write error: %w", err) + return + } +} + +// close commits our blocks to blob storage and closes our writer. +func (c *copier) close() error { + c.wg.Wait() + + if err := c.getErr(); err != nil { + return err + } + + var err error + commitBlockListOptions := c.o.getCommitBlockListOptions() + c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), commitBlockListOptions) + return err +} + +// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments. +type id struct { + u [64]byte + num uint32 + all []string +} + +// newID constructs a new id. +func newID(uu uuid.UUID) *id { + u := [64]byte{} + copy(u[:], uu[:]) + return &id{u: u} +} + +// next returns the next ID. +func (id *id) next() string { + defer atomic.AddUint32(&id.num, 1) + + binary.BigEndian.PutUint32(id.u[len(uuid.UUID{}):], atomic.LoadUint32(&id.num)) + str := base64.StdEncoding.EncodeToString(id.u[:]) + id.all = append(id.all, str) + + return str +} + +// issued returns all ids that have been issued. This returned value shares the internal slice, so it is not safe to modify the return. +// The value is only valid until the next time next() is called. +func (id *id) issued() []string { + return id.all +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml new file mode 100644 index 000000000..e0623f50e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml @@ -0,0 +1,28 @@ +trigger: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azblob + +pr: + branches: + include: + - main + - feature/* + - hotfix/* + - release/* + paths: + include: + - sdk/storage/azblob + + +stages: + - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml + parameters: + ServiceDirectory: 'storage/azblob' + RunLiveTests: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go new file mode 100644 index 000000000..c5d501c66 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/connection.go @@ -0,0 +1,39 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +type connection struct { + u string + p runtime.Pipeline +} + +// newConnection creates an instance of the connection type with the specified endpoint. +// Pass nil to accept the default options; this is the same as passing a zero-value options. +func newConnection(endpoint string, options *azcore.ClientOptions) *connection { + cp := azcore.ClientOptions{} + if options != nil { + cp = *options + } + return &connection{u: endpoint, p: runtime.NewPipeline(moduleName, moduleVersion, runtime.PipelineOptions{}, &cp)} +} + +// Endpoint returns the connection's endpoint. +func (c *connection) Endpoint() string { + return c.u +} + +// Pipeline returns the connection's pipeline. +func (c *connection) Pipeline() runtime.Pipeline { + return c.p +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go new file mode 100644 index 000000000..c1c336ed4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/constants.go @@ -0,0 +1,46 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +var SASVersion = "2019-12-12" + +//nolint +const ( + // BlockBlobMaxUploadBlobBytes indicates the maximum number of bytes that can be sent in a call to Upload. + BlockBlobMaxUploadBlobBytes = 256 * 1024 * 1024 // 256MB + + // BlockBlobMaxStageBlockBytes indicates the maximum number of bytes that can be sent in a call to StageBlock. + BlockBlobMaxStageBlockBytes = 4000 * 1024 * 1024 // 4GB + + // BlockBlobMaxBlocks indicates the maximum number of blocks allowed in a block blob. + BlockBlobMaxBlocks = 50000 + + // PageBlobPageBytes indicates the number of bytes in a page (512). + PageBlobPageBytes = 512 + + // BlobDefaultDownloadBlockSize is default block size + BlobDefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB +) + +const ( + headerAuthorization = "Authorization" + headerXmsDate = "x-ms-date" + headerContentLength = "Content-Length" + headerContentEncoding = "Content-Encoding" + headerContentLanguage = "Content-Language" + headerContentType = "Content-Type" + headerContentMD5 = "Content-MD5" + headerIfModifiedSince = "If-Modified-Since" + headerIfMatch = "If-Match" + headerIfNoneMatch = "If-None-Match" + headerIfUnmodifiedSince = "If-Unmodified-Since" + headerRange = "Range" +) + +const ( + tokenScope = "https://storage.azure.com/.default" +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go new file mode 100644 index 000000000..c2426eb70 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/doc.go @@ -0,0 +1,214 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright 2017 Microsoft Corporation. All rights reserved. +// Use of this source code is governed by an MIT +// license that can be found in the LICENSE file. + +/* + +Package azblob can access an Azure Blob Storage. + +The azblob package is capable of :- + - Creating, deleting, and querying containers in an account + - Creating, deleting, and querying blobs in a container + - Creating Shared Access Signature for authentication + +Types of Resources + +The azblob package allows you to interact with three types of resources :- + +* Azure storage accounts. +* Containers within those storage accounts. +* Blobs (block blobs/ page blobs/ append blobs) within those containers. + +The Azure Blob Storage (azblob) client library for Go allows you to interact with each of these components through the use of a dedicated client object. +To create a client object, you will need the account's blob service endpoint URL and a credential that allows you to access the account. + +Types of Credentials + +The clients support different forms of authentication. +The azblob library supports any of the `azcore.TokenCredential` interfaces, authorization via a Connection String, +or authorization with a Shared Access Signature token. + +Using a Shared Key + +To use an account shared key (aka account key or access key), provide the key as a string. +This can be found in your storage account in the Azure Portal under the "Access Keys" section. + +Use the key as the credential parameter to authenticate the client: + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + + serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) + + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + handle(err) + + fmt.Println(serviceClient.URL()) + +Using a Connection String + +Depending on your use case and authorization method, you may prefer to initialize a client instance with a connection string instead of providing the account URL and credential separately. +To do this, pass the connection string to the service client's `NewServiceClientFromConnectionString` method. +The connection string can be found in your storage account in the Azure Portal under the "Access Keys" section. + + connStr := "DefaultEndpointsProtocol=https;AccountName=;AccountKey=;EndpointSuffix=core.windows.net" + serviceClient, err := azblob.NewServiceClientFromConnectionString(connStr, nil) + +Using a Shared Access Signature (SAS) Token + +To use a shared access signature (SAS) token, provide the token at the end of your service URL. +You can generate a SAS token from the Azure Portal under Shared Access Signature or use the ServiceClient.GetSASToken() functions. + + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + serviceURL := fmt.Sprintf("https://%s.blob.core.windows.net/", accountName) + + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + serviceClient, err := azblob.NewServiceClientWithSharedKey(serviceURL, cred, nil) + handle(err) + fmt.Println(serviceClient.URL()) + + // Alternatively, you can create SAS on the fly + + resources := azblob.AccountSASResourceTypes{Service: true} + permission := azblob.AccountSASPermissions{Read: true} + start := time.Now() + expiry := start.AddDate(0, 0, 1) + serviceURLWithSAS, err := serviceClient.GetSASURL(resources, permission, start, expiry) + handle(err) + + serviceClientWithSAS, err := azblob.NewServiceClientWithNoCredential(serviceURLWithSAS, nil) + handle(err) + + fmt.Println(serviceClientWithSAS.URL()) + +Types of Clients + +There are three different clients provided to interact with the various components of the Blob Service: + +1. **`ServiceClient`** + * Get and set account settings. + * Query, create, and delete containers within the account. + +2. **`ContainerClient`** + * Get and set container access settings, properties, and metadata. + * Create, delete, and query blobs within the container. + * `ContainerLeaseClient` to support container lease management. + +3. **`BlobClient`** + * `AppendBlobClient`, `BlockBlobClient`, and `PageBlobClient` + * Get and set blob properties. + * Perform CRUD operations on a given blob. + * `BlobLeaseClient` to support blob lease management. + +Examples + + // Your account name and key can be obtained from the Azure Portal. + accountName, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_NAME") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_NAME could not be found") + } + + accountKey, ok := os.LookupEnv("AZURE_STORAGE_ACCOUNT_KEY") + if !ok { + panic("AZURE_STORAGE_ACCOUNT_KEY could not be found") + } + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + handle(err) + + // The service URL for blob endpoints is usually in the form: http(s)://.blob.core.windows.net/ + serviceClient, err := azblob.NewServiceClientWithSharedKey(fmt.Sprintf("https://%s.blob.core.windows.net/", accountName), cred, nil) + handle(err) + + // ===== 1. Create a container ===== + + // First, create a container client, and use the Create method to create a new container in your account + containerClient, err := serviceClient.NewContainerClient("testcontainer") + handle(err) + + // All APIs have an options' bag struct as a parameter. + // The options' bag struct allows you to specify optional parameters such as metadata, public access types, etc. + // If you want to use the default options, pass in nil. + _, err = containerClient.Create(context.TODO(), nil) + handle(err) + + // ===== 2. Upload and Download a block blob ===== + uploadData := "Hello world!" + + // Create a new blockBlobClient from the containerClient + blockBlobClient, err := containerClient.NewBlockBlobClient("HelloWorld.txt") + handle(err) + + // Upload data to the block blob + blockBlobUploadOptions := azblob.BlockBlobUploadOptions{ + Metadata: map[string]string{"Foo": "Bar"}, + TagsMap: map[string]string{"Year": "2022"}, + } + _, err = blockBlobClient.Upload(context.TODO(), streaming.NopCloser(strings.NewReader(uploadData)), &blockBlobUploadOptions) + handle(err) + + // Download the blob's contents and ensure that the download worked properly + blobDownloadResponse, err := blockBlobClient.Download(context.TODO(), nil) + handle(err) + + // Use the bytes.Buffer object to read the downloaded data. + // RetryReaderOptions has a lot of in-depth tuning abilities, but for the sake of simplicity, we'll omit those here. + reader := blobDownloadResponse.Body(nil) + downloadData, err := ioutil.ReadAll(reader) + handle(err) + if string(downloadData) != uploadData { + handle(errors.New("Uploaded data should be same as downloaded data")) + } + + + if err = reader.Close(); err != nil { + handle(err) + return + } + + // ===== 3. List blobs ===== + // List methods returns a pager object which can be used to iterate over the results of a paging operation. + // To iterate over a page use the NextPage(context.Context) to fetch the next page of results. + // PageResponse() can be used to iterate over the results of the specific page. + // Always check the Err() method after paging to see if an error was returned by the pager. A pager will return either an error or the page of results. + pager := containerClient.ListBlobsFlat(nil) + for pager.NextPage(context.TODO()) { + resp := pager.PageResponse() + for _, v := range resp.Segment.BlobItems { + fmt.Println(*v.Name) + } + } + + if err = pager.Err(); err != nil { + handle(err) + } + + // Delete the blob. + _, err = blockBlobClient.Delete(context.TODO(), nil) + handle(err) + + // Delete the container. + _, err = containerClient.Delete(context.TODO(), nil) + handle(err) +*/ + +package azblob diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go new file mode 100644 index 000000000..287250039 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/highlevel.go @@ -0,0 +1,316 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "encoding/base64" + "io" + "net/http" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal" + + "bytes" + "errors" + "os" +) + +// uploadReaderAtToBlockBlob uploads a buffer in blocks to a block blob. +func (bb *BlockBlobClient) uploadReaderAtToBlockBlob(ctx context.Context, reader io.ReaderAt, readerSize int64, o UploadOption) (*http.Response, error) { + if o.BlockSize == 0 { + // If bufferSize > (BlockBlobMaxStageBlockBytes * BlockBlobMaxBlocks), then error + if readerSize > BlockBlobMaxStageBlockBytes*BlockBlobMaxBlocks { + return nil, errors.New("buffer is too large to upload to a block blob") + } + // If bufferSize <= BlockBlobMaxUploadBlobBytes, then Upload should be used with just 1 I/O request + if readerSize <= BlockBlobMaxUploadBlobBytes { + o.BlockSize = BlockBlobMaxUploadBlobBytes // Default if unspecified + } else { + o.BlockSize = readerSize / BlockBlobMaxBlocks // buffer / max blocks = block size to use all 50,000 blocks + if o.BlockSize < BlobDefaultDownloadBlockSize { // If the block size is smaller than 4MB, round up to 4MB + o.BlockSize = BlobDefaultDownloadBlockSize + } + // StageBlock will be called with blockSize blocks and a Parallelism of (BufferSize / BlockSize). + } + } + + if readerSize <= BlockBlobMaxUploadBlobBytes { + // If the size can fit in 1 Upload call, do it this way + var body io.ReadSeeker = io.NewSectionReader(reader, 0, readerSize) + if o.Progress != nil { + body = streaming.NewRequestProgress(internal.NopCloser(body), o.Progress) + } + + uploadBlockBlobOptions := o.getUploadBlockBlobOptions() + resp, err := bb.Upload(ctx, internal.NopCloser(body), uploadBlockBlobOptions) + + return resp.RawResponse, err + } + + var numBlocks = uint16(((readerSize - 1) / o.BlockSize) + 1) + + blockIDList := make([]string, numBlocks) // Base-64 encoded block IDs + progress := int64(0) + progressLock := &sync.Mutex{} + + err := DoBatchTransfer(ctx, BatchTransferOptions{ + OperationName: "uploadReaderAtToBlockBlob", + TransferSize: readerSize, + ChunkSize: o.BlockSize, + Parallelism: o.Parallelism, + Operation: func(offset int64, count int64, ctx context.Context) error { + // This function is called once per block. + // It is passed this block's offset within the buffer and its count of bytes + // Prepare to read the proper block/section of the buffer + var body io.ReadSeeker = io.NewSectionReader(reader, offset, count) + blockNum := offset / o.BlockSize + if o.Progress != nil { + blockProgress := int64(0) + body = streaming.NewRequestProgress(internal.NopCloser(body), + func(bytesTransferred int64) { + diff := bytesTransferred - blockProgress + blockProgress = bytesTransferred + progressLock.Lock() // 1 goroutine at a time gets progress report + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + // Block IDs are unique values to avoid issue if 2+ clients are uploading blocks + // at the same time causing PutBlockList to get a mix of blocks from all the clients. + generatedUuid, err := uuid.New() + if err != nil { + return err + } + blockIDList[blockNum] = base64.StdEncoding.EncodeToString([]byte(generatedUuid.String())) + stageBlockOptions := o.getStageBlockOptions() + _, err = bb.StageBlock(ctx, blockIDList[blockNum], internal.NopCloser(body), stageBlockOptions) + return err + }, + }) + if err != nil { + return nil, err + } + // All put blocks were successful, call Put Block List to finalize the blob + commitBlockListOptions := o.getCommitBlockListOptions() + resp, err := bb.CommitBlockList(ctx, blockIDList, commitBlockListOptions) + + return resp.RawResponse, err +} + +// UploadBuffer uploads a buffer in blocks to a block blob. +func (bb *BlockBlobClient) UploadBuffer(ctx context.Context, b []byte, o UploadOption) (*http.Response, error) { + return bb.uploadReaderAtToBlockBlob(ctx, bytes.NewReader(b), int64(len(b)), o) +} + +// UploadFile uploads a file in blocks to a block blob. +func (bb *BlockBlobClient) UploadFile(ctx context.Context, file *os.File, o UploadOption) (*http.Response, error) { + + stat, err := file.Stat() + if err != nil { + return nil, err + } + return bb.uploadReaderAtToBlockBlob(ctx, file, stat.Size(), o) +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadStream copies the file held in io.Reader to the Blob at blockBlobClient. +// A Context deadline or cancellation will cause this to error. +func (bb *BlockBlobClient) UploadStream(ctx context.Context, body io.Reader, o UploadStreamOptions) (BlockBlobCommitBlockListResponse, error) { + if err := o.defaults(); err != nil { + return BlockBlobCommitBlockListResponse{}, err + } + + // If we used the default manager, we need to close it. + if o.transferMangerNotSet { + defer o.TransferManager.Close() + } + + result, err := copyFromReader(ctx, body, bb, o) + if err != nil { + return BlockBlobCommitBlockListResponse{}, err + } + + return result, nil +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DownloadToWriterAt downloads an Azure blob to a WriterAt with parallel. +// Offset and count are optional, pass 0 for both to download the entire blob. +func (b *BlobClient) DownloadToWriterAt(ctx context.Context, offset int64, count int64, writer io.WriterAt, o DownloadOptions) error { + if o.BlockSize == 0 { + o.BlockSize = BlobDefaultDownloadBlockSize + } + + if count == CountToEnd { // If size not specified, calculate it + // If we don't have the length at all, get it + downloadBlobOptions := o.getDownloadBlobOptions(0, CountToEnd, nil) + dr, err := b.Download(ctx, downloadBlobOptions) + if err != nil { + return err + } + count = *dr.ContentLength - offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return nil + } + + // Prepare and do parallel download. + progress := int64(0) + progressLock := &sync.Mutex{} + + err := DoBatchTransfer(ctx, BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + Parallelism: o.Parallelism, + Operation: func(chunkStart int64, count int64, ctx context.Context) error { + + downloadBlobOptions := o.getDownloadBlobOptions(chunkStart+offset, count, nil) + dr, err := b.Download(ctx, downloadBlobOptions) + if err != nil { + return err + } + body := dr.Body(&o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + _, err = io.Copy(newSectionWriter(writer, chunkStart, count), body) + if err != nil { + return err + } + err = body.Close() + return err + }, + }) + if err != nil { + return err + } + return nil +} + +// DownloadToBuffer downloads an Azure blob to a buffer with parallel. +// Offset and count are optional, pass 0 for both to download the entire blob. +func (b *BlobClient) DownloadToBuffer(ctx context.Context, offset int64, count int64, _bytes []byte, o DownloadOptions) error { + return b.DownloadToWriterAt(ctx, offset, count, newBytesWriter(_bytes), o) +} + +// DownloadToFile downloads an Azure blob to a local file. +// The file would be truncated if the size doesn't match. +// Offset and count are optional, pass 0 for both to download the entire blob. +func (b *BlobClient) DownloadToFile(ctx context.Context, offset int64, count int64, file *os.File, o DownloadOptions) error { + // 1. Calculate the size of the destination file + var size int64 + + if count == CountToEnd { + // Try to get Azure blob's size + getBlobPropertiesOptions := o.getBlobPropertiesOptions() + props, err := b.GetProperties(ctx, getBlobPropertiesOptions) + if err != nil { + return err + } + size = *props.ContentLength - offset + } else { + size = count + } + + // 2. Compare and try to resize local file's size if it doesn't match Azure blob's size. + stat, err := file.Stat() + if err != nil { + return err + } + if stat.Size() != size { + if err = file.Truncate(size); err != nil { + return err + } + } + + if size > 0 { + return b.DownloadToWriterAt(ctx, offset, size, file, o) + } else { // if the blob's size is 0, there is no need in downloading it + return nil + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DoBatchTransfer helps to execute operations in a batch manner. +// Can be used by users to customize batch works (for other scenarios that the SDK does not provide) +func DoBatchTransfer(ctx context.Context, o BatchTransferOptions) error { + if o.ChunkSize == 0 { + return errors.New("ChunkSize cannot be 0") + } + + if o.Parallelism == 0 { + o.Parallelism = 5 // default Parallelism + } + + // Prepare and do parallel operations. + numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) + operationChannel := make(chan func() error, o.Parallelism) // Create the channel that release 'Parallelism' goroutines concurrently + operationResponseChannel := make(chan error, numChunks) // Holds each response + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // Create the goroutines that process each operation (in parallel). + for g := uint16(0); g < o.Parallelism; g++ { + //grIndex := g + go func() { + for f := range operationChannel { + err := f() + operationResponseChannel <- err + } + }() + } + + // Add each chunk's operation to the channel. + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + curChunkSize := o.ChunkSize + + if chunkNum == numChunks-1 { // Last chunk + curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total + } + offset := int64(chunkNum) * o.ChunkSize + + operationChannel <- func() error { + return o.Operation(offset, curChunkSize, ctx) + } + } + close(operationChannel) + + // Wait for the operations to complete. + var firstErr error = nil + for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + responseError := <-operationResponseChannel + // record the first error (the original error which should cause the other chunks to fail with canceled context) + if responseError != nil && firstErr == nil { + cancel() // As soon as any operation fails, cancel all remaining operation calls + firstErr = responseError + } + } + return firstErr +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go new file mode 100644 index 000000000..cd2ada9b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/zc_shared.go @@ -0,0 +1,150 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "strconv" + "time" +) + +// CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. +type CtxWithHTTPHeaderKey struct{} + +// CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. +type CtxWithRetryOptionsKey struct{} + +type nopCloser struct { + io.ReadSeeker +} + +func (n nopCloser) Close() error { + return nil +} + +// NopCloser returns a ReadSeekCloser with a no-op close method wrapping the provided io.ReadSeeker. +func NopCloser(rs io.ReadSeeker) io.ReadSeekCloser { + return nopCloser{rs} +} + +// BodyDownloadPolicyOpValues is the struct containing the per-operation values +type BodyDownloadPolicyOpValues struct { + Skip bool +} + +func NewResponseError(inner error, resp *http.Response) error { + return &ResponseError{inner: inner, resp: resp} +} + +type ResponseError struct { + inner error + resp *http.Response +} + +// Error implements the error interface for type ResponseError. +func (e *ResponseError) Error() string { + return e.inner.Error() +} + +// Unwrap returns the inner error. +func (e *ResponseError) Unwrap() error { + return e.inner +} + +// RawResponse returns the HTTP response associated with this error. +func (e *ResponseError) RawResponse() *http.Response { + return e.resp +} + +// NonRetriable indicates this error is non-transient. +func (e *ResponseError) NonRetriable() { + // marker method +} + +// Delay waits for the duration to elapse or the context to be cancelled. +func Delay(ctx context.Context, delay time.Duration) error { + select { + case <-time.After(delay): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// ErrNoBody is returned if the response didn't contain a body. +var ErrNoBody = errors.New("the response did not contain a body") + +// GetJSON reads the response body into a raw JSON object. +// It returns ErrNoBody if there was no content. +func GetJSON(resp *http.Response) (map[string]interface{}, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return nil, err + } + if len(body) == 0 { + return nil, ErrNoBody + } + // put the body back so it's available to others + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + // unmarshall the body to get the value + var jsonBody map[string]interface{} + if err = json.Unmarshal(body, &jsonBody); err != nil { + return nil, err + } + return jsonBody, nil +} + +const HeaderRetryAfter = "Retry-After" + +// RetryAfter returns non-zero if the response contains a Retry-After header value. +func RetryAfter(resp *http.Response) time.Duration { + if resp == nil { + return 0 + } + ra := resp.Header.Get(HeaderRetryAfter) + if ra == "" { + return 0 + } + // retry-after values are expressed in either number of + // seconds or an HTTP-date indicating when to try again + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + return time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + return time.Until(t) + } + return 0 +} + +// HasStatusCode returns true if the Response's status code is one of the specified values. +func HasStatusCode(resp *http.Response, statusCodes ...int) bool { + if resp == nil { + return false + } + for _, sc := range statusCodes { + if resp.StatusCode == sc { + return true + } + } + return false +} + +const defaultScope = "/.default" + +// EndpointToScope converts the provided URL endpoint to its default scope. +func EndpointToScope(endpoint string) string { + if endpoint[len(endpoint)-1] != '/' { + endpoint += "/" + } + return endpoint + defaultScope +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go new file mode 100644 index 000000000..d2e89f5b2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/section_writer.go @@ -0,0 +1,53 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "errors" + "io" +) + +type sectionWriter struct { + count int64 + offset int64 + position int64 + writerAt io.WriterAt +} + +func newSectionWriter(c io.WriterAt, off int64, count int64) *sectionWriter { + return §ionWriter{ + count: count, + offset: off, + writerAt: c, + } +} + +func (c *sectionWriter) Write(p []byte) (int, error) { + remaining := c.count - c.position + + if remaining <= 0 { + return 0, errors.New("end of section reached") + } + + slice := p + + if int64(len(slice)) > remaining { + slice = slice[:remaining] + } + + n, err := c.writerAt.WriteAt(slice, c.offset+c.position) + c.position += int64(n) + if err != nil { + return n, err + } + + if len(p) > n { + return n, errors.New("not enough space for all bytes") + } + + return n, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go new file mode 100644 index 000000000..5c40e9bc2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/transfer_manager.go @@ -0,0 +1,154 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "fmt" + "sync" +) + +// TransferManager provides a buffer and thread pool manager for certain transfer options. +// It is undefined behavior if code outside this package call any of these methods. +type TransferManager interface { + // Get provides a buffer that will be used to read data into and write out to the stream. + // It is guaranteed by this package to not read or write beyond the size of the slice. + Get() []byte + + // Put may or may not put the buffer into underlying storage, depending on settings. + // The buffer must not be touched after this has been called. + Put(b []byte) // nolint + + // Run will use a goroutine pool entry to run a function. This blocks until a pool + // goroutine becomes available. + Run(func()) + + // Close shuts down all internal goroutines. This must be called when the TransferManager + // will no longer be used. Not closing it will cause a goroutine leak. + Close() +} + +// --------------------------------------------------------------------------------------------------------------------- + +type staticBuffer struct { + buffers chan []byte + size int + threadpool chan func() +} + +// NewStaticBuffer creates a TransferManager that will use a channel as a circular buffer +// that can hold "max" buffers of "size". The goroutine pool is also sized at max. This +// can be shared between calls if you wish to control maximum memory and concurrency with +// multiple concurrent calls. +func NewStaticBuffer(size, max int) (TransferManager, error) { + if size < 1 || max < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") + } + + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") + } + + threadpool := make(chan func(), max) + buffers := make(chan []byte, max) + for i := 0; i < max; i++ { + go func() { + for f := range threadpool { + f() + } + }() + + buffers <- make([]byte, size) + } + return staticBuffer{ + buffers: buffers, + size: size, + threadpool: threadpool, + }, nil +} + +// Get implements TransferManager.Get(). +func (s staticBuffer) Get() []byte { + return <-s.buffers +} + +// Put implements TransferManager.Put(). +func (s staticBuffer) Put(b []byte) { // nolint + select { + case s.buffers <- b: + default: // This shouldn't happen, but just in case they call Put() with there own buffer. + } +} + +// Run implements TransferManager.Run(). +func (s staticBuffer) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s staticBuffer) Close() { + close(s.threadpool) + close(s.buffers) +} + +// --------------------------------------------------------------------------------------------------------------------- + +type syncPool struct { + threadpool chan func() + pool sync.Pool +} + +// NewSyncPool creates a TransferManager that will use a sync.Pool +// that can hold a non-capped number of buffers constrained by concurrency. This +// can be shared between calls if you wish to share memory and concurrency. +func NewSyncPool(size, concurrency int) (TransferManager, error) { + if size < 1 || concurrency < 1 { + return nil, fmt.Errorf("cannot be called with size or max set to < 1") + } + + if size < _1MiB { + return nil, fmt.Errorf("cannot have size < 1MiB") + } + + threadpool := make(chan func(), concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for f := range threadpool { + f() + } + }() + } + + return &syncPool{ + threadpool: threadpool, + pool: sync.Pool{ + New: func() interface{} { + return make([]byte, size) + }, + }, + }, nil +} + +// Get implements TransferManager.Get(). +func (s *syncPool) Get() []byte { + return s.pool.Get().([]byte) +} + +// Put implements TransferManager.Put(). +// nolint +func (s *syncPool) Put(b []byte) { + s.pool.Put(b) +} + +// Run implements TransferManager.Run(). +func (s *syncPool) Run(f func()) { + s.threadpool <- f +} + +// Close implements TransferManager.Close(). +func (s *syncPool) Close() { + close(s.threadpool) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go new file mode 100644 index 000000000..612bc784c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_access_policy.go @@ -0,0 +1,67 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "fmt" +) + +// AccessPolicyPermission type simplifies creating the permissions string for a container's access policy. +// Initialize an instance of this type and then call its String method to set AccessPolicy's Permission field. +type AccessPolicyPermission struct { + Read, Add, Create, Write, Delete, List bool +} + +// String produces the access policy permission string for an Azure Storage container. +// Call this method to set AccessPolicy's Permission field. +func (p AccessPolicyPermission) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.List { + b.WriteRune('l') + } + return b.String() +} + +// Parse initializes the AccessPolicyPermission's fields from a string. +func (p *AccessPolicyPermission) Parse(s string) error { + *p = AccessPolicyPermission{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go new file mode 100644 index 000000000..25490ab59 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_append_blob_client.go @@ -0,0 +1,154 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// AppendBlobClient represents a client to an Azure Storage append blob; +type AppendBlobClient struct { + BlobClient + client *appendBlobClient +} + +// NewAppendBlobClient creates an AppendBlobClient with the specified URL, Azure AD credential, and options. +func NewAppendBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*AppendBlobClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &AppendBlobClient{ + client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, + }, nil +} + +// NewAppendBlobClientWithNoCredential creates an AppendBlobClient with the specified URL and options. +func NewAppendBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*AppendBlobClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(blobURL, conOptions) + + return &AppendBlobClient{ + client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, + }, nil +} + +// NewAppendBlobClientWithSharedKey creates an AppendBlobClient with the specified URL, shared key, and options. +func NewAppendBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*AppendBlobClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &AppendBlobClient{ + client: newAppendBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + sharedKey: cred, + }, + }, nil +} + +// WithSnapshot creates a new AppendBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (ab *AppendBlobClient) WithSnapshot(snapshot string) (*AppendBlobClient, error) { + p, err := NewBlobURLParts(ab.URL()) + if err != nil { + return nil, err + } + + p.Snapshot = snapshot + endpoint := p.URL() + pipeline := ab.client.pl + + return &AppendBlobClient{ + client: newAppendBlobClient(endpoint, pipeline), + BlobClient: BlobClient{ + client: newBlobClient(endpoint, pipeline), + sharedKey: ab.sharedKey, + }, + }, nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (ab *AppendBlobClient) WithVersionID(versionID string) (*AppendBlobClient, error) { + p, err := NewBlobURLParts(ab.URL()) + if err != nil { + return nil, err + } + + p.VersionID = versionID + endpoint := p.URL() + pipeline := ab.client.pl + + return &AppendBlobClient{ + client: newAppendBlobClient(endpoint, pipeline), + BlobClient: BlobClient{ + client: newBlobClient(endpoint, pipeline), + sharedKey: ab.sharedKey, + }, + }, nil +} + +// Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (ab *AppendBlobClient) Create(ctx context.Context, options *AppendBlobCreateOptions) (AppendBlobCreateResponse, error) { + appendBlobAppendBlockOptions, blobHttpHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() + + resp, err := ab.client.Create(ctx, 0, appendBlobAppendBlockOptions, blobHttpHeaders, + leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + + return toAppendBlobCreateResponse(resp), handleError(err) +} + +// AppendBlock writes a stream to a new block of data to the end of the existing append blob. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block. +func (ab *AppendBlobClient) AppendBlock(ctx context.Context, body io.ReadSeekCloser, options *AppendBlobAppendBlockOptions) (AppendBlobAppendBlockResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return AppendBlobAppendBlockResponse{}, nil + } + + appendOptions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions, leaseAccessConditions := options.format() + + resp, err := ab.client.AppendBlock(ctx, count, body, appendOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScope, modifiedAccessConditions) + + return toAppendBlobAppendBlockResponse(resp), handleError(err) +} + +// AppendBlockFromURL copies a new block of data from source URL to the end of the existing append blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/append-block-from-url. +func (ab *AppendBlobClient) AppendBlockFromURL(ctx context.Context, source string, o *AppendBlobAppendBlockFromURLOptions) (AppendBlobAppendBlockFromURLResponse, error) { + appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := o.format() + + // content length should be 0 on * from URL. always. It's a 400 if it isn't. + resp, err := ab.client.AppendBlockFromURL(ctx, source, 0, appendBlockFromURLOptions, cpkInfo, cpkScopeInfo, + leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + return toAppendBlobAppendBlockFromURLResponse(resp), handleError(err) +} + +// SealAppendBlob - The purpose of Append Blob Seal is to allow users and applications to seal append blobs, marking them as read only. +// https://docs.microsoft.com/en-us/rest/api/storageservices/append-blob-seal +func (ab *AppendBlobClient) SealAppendBlob(ctx context.Context, options *AppendBlobSealOptions) (AppendBlobSealResponse, error) { + leaseAccessConditions, modifiedAccessConditions, positionAccessConditions := options.format() + resp, err := ab.client.Seal(ctx, nil, leaseAccessConditions, modifiedAccessConditions, positionAccessConditions) + return toAppendBlobSealResponse(resp), handleError(err) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go new file mode 100644 index 000000000..9543d14f8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_client.go @@ -0,0 +1,278 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "errors" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// BlobClient represents a URL to an Azure Storage blob; the blob may be a block blob, append blob, or page blob. +type BlobClient struct { + client *blobClient + sharedKey *SharedKeyCredential +} + +// NewBlobClient creates a BlobClient object using the specified URL, Azure AD credential, and options. +func NewBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlobClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewBlobClientWithNoCredential creates a BlobClient object using the specified URL and options. +func NewBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlobClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(blobURL, conOptions) + + return &BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewBlobClientWithSharedKey creates a BlobClient object using the specified URL, shared key, and options. +func NewBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlobClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &BlobClient{ + client: newBlobClient(blobURL, conn.Pipeline()), + sharedKey: cred, + }, nil +} + +// NewBlobClientFromConnectionString creates BlobClient from a connection String +//nolint +func NewBlobClientFromConnectionString(connectionString, containerName, blobName string, options *ClientOptions) (*BlobClient, error) { + containerClient, err := NewContainerClientFromConnectionString(connectionString, containerName, options) + if err != nil { + return nil, err + } + return containerClient.NewBlobClient(blobName) +} + +// URL returns the URL endpoint used by the BlobClient object. +func (b *BlobClient) URL() string { + return b.client.endpoint +} + +// WithSnapshot creates a new BlobClient object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (b *BlobClient) WithSnapshot(snapshot string) (*BlobClient, error) { + p, err := NewBlobURLParts(b.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + pipeline := b.client.pl + return &BlobClient{ + client: newBlobClient(p.URL(), pipeline), + sharedKey: b.sharedKey, + }, nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (b *BlobClient) WithVersionID(versionID string) (*BlobClient, error) { + p, err := NewBlobURLParts(b.URL()) + if err != nil { + return nil, err + } + p.VersionID = versionID + + pipeline := b.client.pl + return &BlobClient{ + client: newBlobClient(p.URL(), pipeline), + sharedKey: b.sharedKey, + }, nil +} + +// Download reads a range of bytes from a blob. The response also includes the blob's properties and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. +func (b *BlobClient) Download(ctx context.Context, options *BlobDownloadOptions) (BlobDownloadResponse, error) { + o, lease, cpk, accessConditions := options.format() + dr, err := b.client.Download(ctx, o, lease, cpk, accessConditions) + if err != nil { + return BlobDownloadResponse{}, handleError(err) + } + + offset := int64(0) + count := int64(CountToEnd) + + if options != nil && options.Offset != nil { + offset = *options.Offset + } + + if options != nil && options.Count != nil { + count = *options.Count + } + + eTag := "" + if dr.ETag != nil { + eTag = *dr.ETag + } + return BlobDownloadResponse{ + b: b, + blobClientDownloadResponse: dr, + ctx: ctx, + getInfo: HTTPGetterInfo{Offset: offset, Count: count, ETag: eTag}, + ObjectReplicationRules: deserializeORSPolicies(dr.ObjectReplicationRules), + }, err +} + +// Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. +// Note that deleting a blob also deletes all its snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-blob. +func (b *BlobClient) Delete(ctx context.Context, o *BlobDeleteOptions) (BlobDeleteResponse, error) { + basics, leaseInfo, accessConditions := o.format() + resp, err := b.client.Delete(ctx, basics, leaseInfo, accessConditions) + + return toBlobDeleteResponse(resp), handleError(err) +} + +// Undelete restores the contents and metadata of a soft-deleted blob and any associated soft-deleted snapshots. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/undelete-blob. +func (b *BlobClient) Undelete(ctx context.Context, o *BlobUndeleteOptions) (BlobUndeleteResponse, error) { + undeleteOptions := o.format() + resp, err := b.client.Undelete(ctx, undeleteOptions) + + return toBlobUndeleteResponse(resp), handleError(err) +} + +// SetTier operation sets the tier on a blob. The operation is allowed on a page +// blob in a premium storage account and on a block blob in a blob storage account (locally +// redundant storage only). A premium page blob's tier determines the allowed size, IOPS, and +// bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive storage type. This operation +// does not update the blob's ETag. +// For detailed information about block blob level tiering see https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blob-storage-tiers. +func (b *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobSetTierOptions) (BlobSetTierResponse, error) { + basics, lease, accessConditions := options.format() + resp, err := b.client.SetTier(ctx, tier, basics, lease, accessConditions) + + return toBlobSetTierResponse(resp), handleError(err) +} + +// GetProperties returns the blob's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob-properties. +func (b *BlobClient) GetProperties(ctx context.Context, options *BlobGetPropertiesOptions) (BlobGetPropertiesResponse, error) { + basics, lease, cpk, access := options.format() + resp, err := b.client.GetProperties(ctx, basics, lease, cpk, access) + + return toGetBlobPropertiesResponse(resp), handleError(err) +} + +// SetHTTPHeaders changes a blob's HTTP headers. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (b *BlobClient) SetHTTPHeaders(ctx context.Context, blobHttpHeaders BlobHTTPHeaders, options *BlobSetHTTPHeadersOptions) (BlobSetHTTPHeadersResponse, error) { + basics, lease, access := options.format() + resp, err := b.client.SetHTTPHeaders(ctx, basics, &blobHttpHeaders, lease, access) + + return toBlobSetHTTPHeadersResponse(resp), handleError(err) +} + +// SetMetadata changes a blob's metadata. +// https://docs.microsoft.com/rest/api/storageservices/set-blob-metadata. +func (b *BlobClient) SetMetadata(ctx context.Context, metadata map[string]string, options *BlobSetMetadataOptions) (BlobSetMetadataResponse, error) { + basics := blobClientSetMetadataOptions{ + Metadata: metadata, + } + lease, cpk, cpkScope, access := options.format() + resp, err := b.client.SetMetadata(ctx, &basics, lease, cpk, cpkScope, access) + + return toBlobSetMetadataResponse(resp), handleError(err) +} + +// CreateSnapshot creates a read-only snapshot of a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/snapshot-blob. +func (b *BlobClient) CreateSnapshot(ctx context.Context, options *BlobCreateSnapshotOptions) (BlobCreateSnapshotResponse, error) { + // CreateSnapshot does NOT panic if the user tries to create a snapshot using a URL that already has a snapshot query parameter + // because checking this would be a performance hit for a VERY unusual path, and we don't think the common case should suffer this + // performance hit. + basics, cpk, cpkScope, access, lease := options.format() + resp, err := b.client.CreateSnapshot(ctx, basics, cpk, cpkScope, access, lease) + + return toBlobCreateSnapshotResponse(resp), handleError(err) +} + +// StartCopyFromURL copies the data at the source URL to a blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/copy-blob. +func (b *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobStartCopyOptions) (BlobStartCopyFromURLResponse, error) { + basics, srcAccess, destAccess, lease := options.format() + resp, err := b.client.StartCopyFromURL(ctx, copySource, basics, srcAccess, destAccess, lease) + + return toBlobStartCopyFromURLResponse(resp), handleError(err) +} + +// AbortCopyFromURL stops a pending copy that was previously started and leaves a destination blob with 0 length and metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/abort-copy-blob. +func (b *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobAbortCopyOptions) (BlobAbortCopyFromURLResponse, error) { + basics, lease := options.format() + resp, err := b.client.AbortCopyFromURL(ctx, copyID, basics, lease) + + return toBlobAbortCopyFromURLResponse(resp), handleError(err) +} + +// SetTags operation enables users to set tags on a blob or specific blob version, but not snapshot. +// Each call to this operation replaces all existing tags attached to the blob. +// To remove all tags from the blob, call this operation with no tags set. +// https://docs.microsoft.com/en-us/rest/api/storageservices/set-blob-tags +func (b *BlobClient) SetTags(ctx context.Context, options *BlobSetTagsOptions) (BlobSetTagsResponse, error) { + blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.client.SetTags(ctx, blobSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + + return toBlobSetTagsResponse(resp), handleError(err) +} + +// GetTags operation enables users to get tags on a blob or specific blob version, or snapshot. +// https://docs.microsoft.com/en-us/rest/api/storageservices/get-blob-tags +func (b *BlobClient) GetTags(ctx context.Context, options *BlobGetTagsOptions) (BlobGetTagsResponse, error) { + blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions := options.format() + resp, err := b.client.GetTags(ctx, blobGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + + return toBlobGetTagsResponse(resp), handleError(err) + +} + +// GetSASToken is a convenience method for generating a SAS token for the currently pointed at blob. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (b *BlobClient) GetSASToken(permissions BlobSASPermissions, start time.Time, expiry time.Time) (SASQueryParameters, error) { + urlParts, _ := NewBlobURLParts(b.URL()) + + t, err := time.Parse(SnapshotTimeFormat, urlParts.Snapshot) + + if err != nil { + t = time.Time{} + } + + if b.sharedKey == nil { + return SASQueryParameters{}, errors.New("credential is not a SharedKeyCredential. SAS can only be signed with a SharedKeyCredential") + } + + return BlobSASSignatureValues{ + ContainerName: urlParts.ContainerName, + BlobName: urlParts.BlobName, + SnapshotTime: t, + Version: SASVersion, + + Permissions: permissions.String(), + + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.NewSASQueryParameters(b.sharedKey) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go new file mode 100644 index 000000000..a9273dfb6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_blob_lease_client.go @@ -0,0 +1,98 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +// BlobLeaseClient represents lease client on blob +type BlobLeaseClient struct { + BlobClient + leaseID *string +} + +// NewBlobLeaseClient is constructor for BlobLeaseClient +func (b *BlobClient) NewBlobLeaseClient(leaseID *string) (*BlobLeaseClient, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return &BlobLeaseClient{ + BlobClient: *b, + leaseID: leaseID, + }, nil +} + +// AcquireLease acquires a lease on the blob for write and delete operations. +//The lease Duration must be between 15 and 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (blc *BlobLeaseClient) AcquireLease(ctx context.Context, options *BlobAcquireLeaseOptions) (BlobAcquireLeaseResponse, error) { + blobAcquireLeaseOptions, modifiedAccessConditions := options.format() + blobAcquireLeaseOptions.ProposedLeaseID = blc.leaseID + + resp, err := blc.client.AcquireLease(ctx, &blobAcquireLeaseOptions, modifiedAccessConditions) + return toBlobAcquireLeaseResponse(resp), handleError(err) +} + +// BreakLease breaks the blob's previously-acquired lease (if it exists). Pass the LeaseBreakDefault (-1) +// constant to break a fixed-Duration lease when it expires or an infinite lease immediately. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (blc *BlobLeaseClient) BreakLease(ctx context.Context, options *BlobBreakLeaseOptions) (BlobBreakLeaseResponse, error) { + blobBreakLeaseOptions, modifiedAccessConditions := options.format() + resp, err := blc.client.BreakLease(ctx, blobBreakLeaseOptions, modifiedAccessConditions) + return toBlobBreakLeaseResponse(resp), handleError(err) +} + +// ChangeLease changes the blob's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (blc *BlobLeaseClient) ChangeLease(ctx context.Context, options *BlobChangeLeaseOptions) (BlobChangeLeaseResponse, error) { + if blc.leaseID == nil { + return BlobChangeLeaseResponse{}, errors.New("leaseID cannot be nil") + } + proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format() + if err != nil { + return BlobChangeLeaseResponse{}, err + } + resp, err := blc.client.ChangeLease(ctx, *blc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions) + + // If lease has been changed successfully, set the leaseID in client + if err == nil { + blc.leaseID = proposedLeaseID + } + + return toBlobChangeLeaseResponse(resp), handleError(err) +} + +// RenewLease renews the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (blc *BlobLeaseClient) RenewLease(ctx context.Context, options *BlobRenewLeaseOptions) (BlobRenewLeaseResponse, error) { + if blc.leaseID == nil { + return BlobRenewLeaseResponse{}, errors.New("leaseID cannot be nil") + } + renewLeaseBlobOptions, modifiedAccessConditions := options.format() + resp, err := blc.client.RenewLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) + return toBlobRenewLeaseResponse(resp), handleError(err) +} + +// ReleaseLease releases the blob's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-blob. +func (blc *BlobLeaseClient) ReleaseLease(ctx context.Context, options *ReleaseLeaseBlobOptions) (BlobReleaseLeaseResponse, error) { + if blc.leaseID == nil { + return BlobReleaseLeaseResponse{}, errors.New("leaseID cannot be nil") + } + renewLeaseBlobOptions, modifiedAccessConditions := options.format() + resp, err := blc.client.ReleaseLease(ctx, *blc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) + return toBlobReleaseLeaseResponse(resp), handleError(err) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go new file mode 100644 index 000000000..b080128c8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_block_blob_client.go @@ -0,0 +1,201 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "io" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" +) + +// BlockBlobClient defines a set of operations applicable to block blobs. +type BlockBlobClient struct { + BlobClient + client *blockBlobClient +} + +// NewBlockBlobClient creates a BlockBlobClient object using the specified URL, Azure AD credential, and options. +func NewBlockBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*BlockBlobClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) + return &BlockBlobClient{ + client: newBlockBlobClient(bClient.endpoint, bClient.pl), + BlobClient: BlobClient{ + client: bClient, + }, + }, nil +} + +// NewBlockBlobClientWithNoCredential creates a BlockBlobClient object using the specified URL and options. +func NewBlockBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*BlockBlobClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(blobURL, conOptions) + + bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) + return &BlockBlobClient{ + client: newBlockBlobClient(bClient.endpoint, bClient.pl), + BlobClient: BlobClient{ + client: bClient, + }, + }, nil +} + +// NewBlockBlobClientWithSharedKey creates a BlockBlobClient object using the specified URL, shared key, and options. +func NewBlockBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*BlockBlobClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + bClient := newBlobClient(conn.Endpoint(), conn.Pipeline()) + return &BlockBlobClient{ + client: newBlockBlobClient(bClient.endpoint, bClient.pl), + BlobClient: BlobClient{ + client: bClient, + sharedKey: cred, + }, + }, nil +} + +// WithSnapshot creates a new BlockBlobClient object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (bb *BlockBlobClient) WithSnapshot(snapshot string) (*BlockBlobClient, error) { + p, err := NewBlobURLParts(bb.URL()) + if err != nil { + return nil, err + } + + p.Snapshot = snapshot + endpoint := p.URL() + bClient := newBlobClient(endpoint, bb.client.pl) + + return &BlockBlobClient{ + client: newBlockBlobClient(bClient.endpoint, bClient.pl), + BlobClient: BlobClient{ + client: bClient, + sharedKey: bb.sharedKey, + }, + }, nil +} + +// WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. +// Pass "" to remove the versionID returning a URL to the base blob. +func (bb *BlockBlobClient) WithVersionID(versionID string) (*BlockBlobClient, error) { + p, err := NewBlobURLParts(bb.URL()) + if err != nil { + return nil, err + } + + p.VersionID = versionID + endpoint := p.URL() + bClient := newBlobClient(endpoint, bb.client.pl) + + return &BlockBlobClient{ + client: newBlockBlobClient(bClient.endpoint, bClient.pl), + BlobClient: BlobClient{ + client: bClient, + sharedKey: bb.sharedKey, + }, + }, nil +} + +// Upload creates a new block blob or overwrites an existing block blob. +// Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not +// supported with Upload; the content of the existing blob is overwritten with the new content. To +// perform a partial update of a block blob, use StageBlock and CommitBlockList. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (bb *BlockBlobClient) Upload(ctx context.Context, body io.ReadSeekCloser, options *BlockBlobUploadOptions) (BlockBlobUploadResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return BlockBlobUploadResponse{}, err + } + + basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() + + resp, err := bb.client.Upload(ctx, count, body, basics, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) + + return toBlockBlobUploadResponse(resp), handleError(err) +} + +// StageBlock uploads the specified block to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block. +func (bb *BlockBlobClient) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, + options *BlockBlobStageBlockOptions) (BlockBlobStageBlockResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + if err != nil { + return BlockBlobStageBlockResponse{}, err + } + + stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo := options.format() + resp, err := bb.client.StageBlock(ctx, base64BlockID, count, body, stageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo) + + return toBlockBlobStageBlockResponse(resp), handleError(err) +} + +// StageBlockFromURL copies the specified block from a source URL to the block blob's "staging area" to be later committed by a call to CommitBlockList. +// If count is CountToEnd (0), then data is read from specified offset to the end. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/put-block-from-url. +func (bb *BlockBlobClient) StageBlockFromURL(ctx context.Context, base64BlockID string, sourceURL string, + contentLength int64, options *BlockBlobStageBlockFromURLOptions) (BlockBlobStageBlockFromURLResponse, error) { + + stageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions := options.format() + + resp, err := bb.client.StageBlockFromURL(ctx, base64BlockID, contentLength, sourceURL, stageBlockFromURLOptions, + cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + + return toBlockBlobStageBlockFromURLResponse(resp), handleError(err) +} + +// CommitBlockList writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written +// to the server in a prior PutBlock operation. You can call PutBlockList to update a blob +// by uploading only those blocks that have changed, then committing the new and existing +// blocks together. Any blocks not specified in the block list and permanently deleted. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-block-list. +func (bb *BlockBlobClient) CommitBlockList(ctx context.Context, base64BlockIDs []string, options *BlockBlobCommitBlockListOptions) (BlockBlobCommitBlockListResponse, error) { + // this is a code smell in the generated code + blockIds := make([]*string, len(base64BlockIDs)) + for k, v := range base64BlockIDs { + blockIds[k] = to.Ptr(v) + } + + blockLookupList := BlockLookupList{Latest: blockIds} + commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess := options.format() + + resp, err := bb.client.CommitBlockList(ctx, blockLookupList, commitOptions, headers, leaseAccess, cpkInfo, cpkScope, modifiedAccess) + + return toBlockBlobCommitBlockListResponse(resp), handleError(err) +} + +// GetBlockList returns the list of blocks that have been uploaded as part of a block blob using the specified block list filter. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-block-list. +func (bb *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobGetBlockListOptions) (BlockBlobGetBlockListResponse, error) { + o, lac, mac := options.format() + + resp, err := bb.client.GetBlockList(ctx, listType, o, lac, mac) + + return toBlockBlobGetBlockListResponse(resp), handleError(err) +} + +// CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. +// For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. +func (bb *BlockBlobClient) CopyFromURL(ctx context.Context, source string, options *BlockBlobCopyFromURLOptions) (BlockBlobCopyFromURLResponse, error) { + copyOptions, smac, mac, lac := options.format() + resp, err := bb.BlobClient.client.CopyFromURL(ctx, source, copyOptions, smac, mac, lac) + + return toBlockBlobCopyFromURLResponse(resp), handleError(err) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go new file mode 100644 index 000000000..2c23b8f4e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_connection_string.go @@ -0,0 +1,88 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "errors" + "fmt" + "strings" +) + +var errConnectionString = errors.New("connection string is either blank or malformed. The expected connection string " + + "should contain key value pairs separated by semicolons. For example 'DefaultEndpointsProtocol=https;AccountName=;" + + "AccountKey=;EndpointSuffix=core.windows.net'") + +// convertConnStrToMap converts a connection string (in format key1=value1;key2=value2;key3=value3;) into a map of key-value pairs +func convertConnStrToMap(connStr string) (map[string]string, error) { + ret := make(map[string]string) + connStr = strings.TrimRight(connStr, ";") + + splitString := strings.Split(connStr, ";") + if len(splitString) == 0 { + return ret, errConnectionString + } + for _, stringPart := range splitString { + parts := strings.SplitN(stringPart, "=", 2) + if len(parts) != 2 { + return ret, errConnectionString + } + ret[parts[0]] = parts[1] + } + return ret, nil +} + +// parseConnectionString parses a connection string into a service URL and a SharedKeyCredential or a service url with the +// SharedAccessSignature combined. +func parseConnectionString(connectionString string) (string, *SharedKeyCredential, error) { + var serviceURL string + var cred *SharedKeyCredential + + defaultScheme := "https" + defaultSuffix := "core.windows.net" + + connStrMap, err := convertConnStrToMap(connectionString) + if err != nil { + return "", nil, err + } + + accountName, ok := connStrMap["AccountName"] + if !ok { + return "", nil, errConnectionString + } + accountKey, ok := connStrMap["AccountKey"] + if !ok { + sharedAccessSignature, ok := connStrMap["SharedAccessSignature"] + if !ok { + return "", nil, errConnectionString + } + return fmt.Sprintf("%v://%v.blob.%v/?%v", defaultScheme, accountName, defaultSuffix, sharedAccessSignature), nil, nil + } + + protocol, ok := connStrMap["DefaultEndpointsProtocol"] + if !ok { + protocol = defaultScheme + } + + suffix, ok := connStrMap["EndpointSuffix"] + if !ok { + suffix = defaultSuffix + } + + blobEndpoint, ok := connStrMap["BlobEndpoint"] + if ok { + cred, err = NewSharedKeyCredential(accountName, accountKey) + return blobEndpoint, cred, err + } + serviceURL = fmt.Sprintf("%v://%v.blob.%v", protocol, accountName, suffix) + + cred, err = NewSharedKeyCredential(accountName, accountKey) + if err != nil { + return "", nil, err + } + + return serviceURL, cred, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go new file mode 100644 index 000000000..12c4a18df --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_client.go @@ -0,0 +1,253 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "errors" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// ContainerClient represents a URL to the Azure Storage container allowing you to manipulate its blobs. +type ContainerClient struct { + client *containerClient + sharedKey *SharedKeyCredential +} + +// URL returns the URL endpoint used by the ContainerClient object. +func (c *ContainerClient) URL() string { + return c.client.endpoint +} + +// NewContainerClient creates a ContainerClient object using the specified URL, Azure AD credential, and options. +func NewContainerClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*ContainerClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(containerURL, conOptions) + + return &ContainerClient{ + client: newContainerClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewContainerClientWithNoCredential creates a ContainerClient object using the specified URL and options. +func NewContainerClientWithNoCredential(containerURL string, options *ClientOptions) (*ContainerClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(containerURL, conOptions) + + return &ContainerClient{ + client: newContainerClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewContainerClientWithSharedKey creates a ContainerClient object using the specified URL, shared key, and options. +func NewContainerClientWithSharedKey(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*ContainerClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(containerURL, conOptions) + + return &ContainerClient{ + client: newContainerClient(conn.Endpoint(), conn.Pipeline()), + sharedKey: cred, + }, nil +} + +// NewContainerClientFromConnectionString creates a ContainerClient object using connection string of an account +func NewContainerClientFromConnectionString(connectionString string, containerName string, options *ClientOptions) (*ContainerClient, error) { + svcClient, err := NewServiceClientFromConnectionString(connectionString, options) + if err != nil { + return nil, err + } + return svcClient.NewContainerClient(containerName) +} + +// NewBlobClient creates a new BlobClient object by concatenating blobName to the end of +// ContainerClient's URL. The new BlobClient uses the same request policy pipeline as the ContainerClient. +// To change the pipeline, create the BlobClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlobClient instead of calling this object's +// NewBlobClient method. +func (c *ContainerClient) NewBlobClient(blobName string) (*BlobClient, error) { + blobURL := appendToURLPath(c.URL(), blobName) + + return &BlobClient{ + client: newBlobClient(blobURL, c.client.pl), + sharedKey: c.sharedKey, + }, nil +} + +// NewAppendBlobClient creates a new AppendBlobURL object by concatenating blobName to the end of +// ContainerClient's URL. The new AppendBlobURL uses the same request policy pipeline as the ContainerClient. +// To change the pipeline, create the AppendBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewAppendBlobClient instead of calling this object's +// NewAppendBlobClient method. +func (c *ContainerClient) NewAppendBlobClient(blobName string) (*AppendBlobClient, error) { + blobURL := appendToURLPath(c.URL(), blobName) + + return &AppendBlobClient{ + BlobClient: BlobClient{ + client: newBlobClient(blobURL, c.client.pl), + sharedKey: c.sharedKey, + }, + client: newAppendBlobClient(blobURL, c.client.pl), + }, nil +} + +// NewBlockBlobClient creates a new BlockBlobClient object by concatenating blobName to the end of +// ContainerClient's URL. The new BlockBlobClient uses the same request policy pipeline as the ContainerClient. +// To change the pipeline, create the BlockBlobClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewBlockBlobClient instead of calling this object's +// NewBlockBlobClient method. +func (c *ContainerClient) NewBlockBlobClient(blobName string) (*BlockBlobClient, error) { + blobURL := appendToURLPath(c.URL(), blobName) + + return &BlockBlobClient{ + BlobClient: BlobClient{ + client: newBlobClient(blobURL, c.client.pl), + sharedKey: c.sharedKey, + }, + client: newBlockBlobClient(blobURL, c.client.pl), + }, nil +} + +// NewPageBlobClient creates a new PageBlobURL object by concatenating blobName to the end of ContainerClient's URL. The new PageBlobURL uses the same request policy pipeline as the ContainerClient. +// To change the pipeline, create the PageBlobURL and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewPageBlobClient instead of calling this object's +// NewPageBlobClient method. +func (c *ContainerClient) NewPageBlobClient(blobName string) (*PageBlobClient, error) { + blobURL := appendToURLPath(c.URL(), blobName) + + return &PageBlobClient{ + BlobClient: BlobClient{ + client: newBlobClient(blobURL, c.client.pl), + sharedKey: c.sharedKey, + }, + client: newPageBlobClient(blobURL, c.client.pl), + }, nil +} + +// Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/create-container. +func (c *ContainerClient) Create(ctx context.Context, options *ContainerCreateOptions) (ContainerCreateResponse, error) { + basics, cpkInfo := options.format() + resp, err := c.client.Create(ctx, basics, cpkInfo) + + return toContainerCreateResponse(resp), handleError(err) +} + +// Delete marks the specified container for deletion. The container and any blobs contained within it are later deleted during garbage collection. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/delete-container. +func (c *ContainerClient) Delete(ctx context.Context, o *ContainerDeleteOptions) (ContainerDeleteResponse, error) { + basics, leaseInfo, accessConditions := o.format() + resp, err := c.client.Delete(ctx, basics, leaseInfo, accessConditions) + + return toContainerDeleteResponse(resp), handleError(err) +} + +// GetProperties returns the container's properties. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-metadata. +func (c *ContainerClient) GetProperties(ctx context.Context, o *ContainerGetPropertiesOptions) (ContainerGetPropertiesResponse, error) { + // NOTE: GetMetadata actually calls GetProperties internally because GetProperties returns the metadata AND the properties. + // This allows us to not expose a GetProperties method at all simplifying the API. + // The optionals are nil, like they were in track 1.5 + options, leaseAccess := o.format() + resp, err := c.client.GetProperties(ctx, options, leaseAccess) + + return toContainerGetPropertiesResponse(resp), handleError(err) +} + +// SetMetadata sets the container's metadata. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-metadata. +func (c *ContainerClient) SetMetadata(ctx context.Context, o *ContainerSetMetadataOptions) (ContainerSetMetadataResponse, error) { + metadataOptions, lac, mac := o.format() + resp, err := c.client.SetMetadata(ctx, metadataOptions, lac, mac) + + return toContainerSetMetadataResponse(resp), handleError(err) +} + +// GetAccessPolicy returns the container's access policy. The access policy indicates whether container's blobs may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-container-acl. +func (c *ContainerClient) GetAccessPolicy(ctx context.Context, o *ContainerGetAccessPolicyOptions) (ContainerGetAccessPolicyResponse, error) { + options, ac := o.format() + resp, err := c.client.GetAccessPolicy(ctx, options, ac) + + return toContainerGetAccessPolicyResponse(resp), handleError(err) +} + +// SetAccessPolicy sets the container's permissions. The access policy indicates whether blobs in a container may be accessed publicly. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-container-acl. +func (c *ContainerClient) SetAccessPolicy(ctx context.Context, o *ContainerSetAccessPolicyOptions) (ContainerSetAccessPolicyResponse, error) { + accessPolicy, mac, lac := o.format() + resp, err := c.client.SetAccessPolicy(ctx, accessPolicy, mac, lac) + + return toContainerSetAccessPolicyResponse(resp), handleError(err) +} + +// ListBlobsFlat returns a pager for blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +func (c *ContainerClient) ListBlobsFlat(o *ContainerListBlobsFlatOptions) *ContainerListBlobFlatPager { + listOptions := o.format() + pager := c.client.ListBlobFlatSegment(listOptions) + + // override the advancer + pager.advancer = func(ctx context.Context, response containerClientListBlobFlatSegmentResponse) (*policy.Request, error) { + listOptions.Marker = response.NextMarker + return c.client.listBlobFlatSegmentCreateRequest(ctx, listOptions) + } + + return toContainerListBlobFlatSegmentPager(pager) +} + +// ListBlobsHierarchy returns a channel of blobs starting from the specified Marker. Use an empty +// Marker to start enumeration from the beginning. Blob names are returned in lexicographic order. +// After getting a segment, process it, and then call ListBlobsHierarchicalSegment again (passing the the +// previously-returned Marker) to get the next segment. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-blobs. +// AutoPagerTimeout specifies the amount of time with no read operations before the channel times out and closes. Specify no time and it will be ignored. +// AutoPagerBufferSize specifies the channel's buffer size. +// Both the blob item channel and error channel should be watched. Only one error will be released via this channel (or a nil error, to register a clean exit.) +func (c *ContainerClient) ListBlobsHierarchy(delimiter string, o *ContainerListBlobsHierarchyOptions) *ContainerListBlobHierarchyPager { + listOptions := o.format() + pager := c.client.ListBlobHierarchySegment(delimiter, listOptions) + + // override the advancer + pager.advancer = func(ctx context.Context, response containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) { + listOptions.Marker = response.NextMarker + return c.client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, listOptions) + } + + return toContainerListBlobHierarchySegmentPager(pager) +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at container. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +func (c *ContainerClient) GetSASURL(permissions ContainerSASPermissions, start time.Time, expiry time.Time) (string, error) { + if c.sharedKey == nil { + return "", errors.New("SAS can only be signed with a SharedKeyCredential") + } + + urlParts, err := NewBlobURLParts(c.URL()) + if err != nil { + return "", err + } + + // Containers do not have snapshots, nor versions. + urlParts.SAS, err = BlobSASSignatureValues{ + ContainerName: urlParts.ContainerName, + Permissions: permissions.String(), + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.NewSASQueryParameters(c.sharedKey) + + return urlParts.URL(), err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go new file mode 100644 index 000000000..395a72a89 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_container_lease_client.go @@ -0,0 +1,102 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "errors" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +//ContainerLeaseClient represents lease client of container +type ContainerLeaseClient struct { + ContainerClient + leaseID *string +} + +// NewContainerLeaseClient is constructor of ContainerLeaseClient +func (c *ContainerClient) NewContainerLeaseClient(leaseID *string) (*ContainerLeaseClient, error) { + if leaseID == nil { + generatedUuid, err := uuid.New() + if err != nil { + return nil, err + } + leaseID = to.Ptr(generatedUuid.String()) + } + return &ContainerLeaseClient{ + ContainerClient: *c, + leaseID: leaseID, + }, nil +} + +// AcquireLease acquires a lease on the container for delete operations. The lease Duration must be between 15 to 60 seconds, or infinite (-1). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (clc *ContainerLeaseClient) AcquireLease(ctx context.Context, options *ContainerAcquireLeaseOptions) (ContainerAcquireLeaseResponse, error) { + containerAcquireLeaseOptions, modifiedAccessConditions := options.format() + containerAcquireLeaseOptions.ProposedLeaseID = clc.leaseID + + resp, err := clc.client.AcquireLease(ctx, &containerAcquireLeaseOptions, modifiedAccessConditions) + if err == nil && resp.LeaseID != nil { + clc.leaseID = resp.LeaseID + } + return toContainerAcquireLeaseResponse(resp), handleError(err) +} + +// BreakLease breaks the container's previously-acquired lease (if it exists). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (clc *ContainerLeaseClient) BreakLease(ctx context.Context, options *ContainerBreakLeaseOptions) (ContainerBreakLeaseResponse, error) { + containerBreakLeaseOptions, modifiedAccessConditions := options.format() + resp, err := clc.client.BreakLease(ctx, containerBreakLeaseOptions, modifiedAccessConditions) + return toContainerBreakLeaseResponse(resp), handleError(err) +} + +// ChangeLease changes the container's lease ID. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (clc *ContainerLeaseClient) ChangeLease(ctx context.Context, options *ContainerChangeLeaseOptions) (ContainerChangeLeaseResponse, error) { + if clc.leaseID == nil { + return ContainerChangeLeaseResponse{}, errors.New("leaseID cannot be nil") + } + + proposedLeaseID, changeLeaseOptions, modifiedAccessConditions, err := options.format() + if err != nil { + return ContainerChangeLeaseResponse{}, err + } + + resp, err := clc.client.ChangeLease(ctx, *clc.leaseID, *proposedLeaseID, changeLeaseOptions, modifiedAccessConditions) + if err == nil && resp.LeaseID != nil { + clc.leaseID = resp.LeaseID + } + return toContainerChangeLeaseResponse(resp), handleError(err) +} + +// ReleaseLease releases the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (clc *ContainerLeaseClient) ReleaseLease(ctx context.Context, options *ContainerReleaseLeaseOptions) (ContainerReleaseLeaseResponse, error) { + if clc.leaseID == nil { + return ContainerReleaseLeaseResponse{}, errors.New("leaseID cannot be nil") + } + containerReleaseLeaseOptions, modifiedAccessConditions := options.format() + resp, err := clc.client.ReleaseLease(ctx, *clc.leaseID, containerReleaseLeaseOptions, modifiedAccessConditions) + + return toContainerReleaseLeaseResponse(resp), handleError(err) +} + +// RenewLease renews the container's previously-acquired lease. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/lease-container. +func (clc *ContainerLeaseClient) RenewLease(ctx context.Context, options *ContainerRenewLeaseOptions) (ContainerRenewLeaseResponse, error) { + if clc.leaseID == nil { + return ContainerRenewLeaseResponse{}, errors.New("leaseID cannot be nil") + } + renewLeaseBlobOptions, modifiedAccessConditions := options.format() + resp, err := clc.client.RenewLease(ctx, *clc.leaseID, renewLeaseBlobOptions, modifiedAccessConditions) + if err == nil && resp.LeaseID != nil { + clc.leaseID = resp.LeaseID + } + return toContainerRenewLeaseResponse(resp), handleError(err) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go new file mode 100644 index 000000000..507993b9e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_page_blob_client.go @@ -0,0 +1,261 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "io" + "net/url" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// PageBlobClient represents a client to an Azure Storage page blob; +type PageBlobClient struct { + BlobClient + client *pageBlobClient +} + +// NewPageBlobClient creates a ServiceClient object using the specified URL, Azure AD credential, and options. +// Example of serviceURL: https://.blob.core.windows.net +func NewPageBlobClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*PageBlobClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &PageBlobClient{ + client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, + }, nil +} + +// NewPageBlobClientWithNoCredential creates a ServiceClient object using the specified URL and options. +// Example of serviceURL: https://.blob.core.windows.net? +func NewPageBlobClientWithNoCredential(blobURL string, options *ClientOptions) (*PageBlobClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(blobURL, conOptions) + + return &PageBlobClient{ + client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + }, + }, nil +} + +// NewPageBlobClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options. +// Example of serviceURL: https://.blob.core.windows.net +func NewPageBlobClientWithSharedKey(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*PageBlobClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(blobURL, conOptions) + + return &PageBlobClient{ + client: newPageBlobClient(conn.Endpoint(), conn.Pipeline()), + BlobClient: BlobClient{ + client: newBlobClient(conn.Endpoint(), conn.Pipeline()), + sharedKey: cred, + }, + }, nil +} + +// WithSnapshot creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the snapshot returning a URL to the base blob. +func (pb *PageBlobClient) WithSnapshot(snapshot string) (*PageBlobClient, error) { + p, err := NewBlobURLParts(pb.URL()) + if err != nil { + return nil, err + } + p.Snapshot = snapshot + + endpoint := p.URL() + pipeline := pb.client.pl + return &PageBlobClient{ + client: newPageBlobClient(endpoint, pipeline), + BlobClient: BlobClient{ + client: newBlobClient(endpoint, pipeline), + sharedKey: pb.sharedKey, + }, + }, nil +} + +// WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. +// Pass "" to remove the version returning a URL to the base blob. +func (pb *PageBlobClient) WithVersionID(versionID string) (*PageBlobClient, error) { + p, err := NewBlobURLParts(pb.URL()) + if err != nil { + return nil, err + } + + p.VersionID = versionID + endpoint := p.URL() + + pipeline := pb.client.pl + return &PageBlobClient{ + client: newPageBlobClient(endpoint, pipeline), + BlobClient: BlobClient{ + client: newBlobClient(endpoint, pipeline), + sharedKey: pb.sharedKey, + }, + }, nil +} + +// Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-blob. +func (pb *PageBlobClient) Create(ctx context.Context, size int64, o *PageBlobCreateOptions) (PageBlobCreateResponse, error) { + createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := o.format() + + resp, err := pb.client.Create(ctx, 0, size, createOptions, HTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + + return toPageBlobCreateResponse(resp), handleError(err) +} + +// UploadPages writes 1 or more pages to the page blob. The start offset and the stream size must be a multiple of 512 bytes. +// This method panics if the stream is not at position 0. +// Note that the http client closes the body stream after the request is sent to the service. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *PageBlobClient) UploadPages(ctx context.Context, body io.ReadSeekCloser, options *PageBlobUploadPagesOptions) (PageBlobUploadPagesResponse, error) { + count, err := validateSeekableStreamAt0AndGetCount(body) + + if err != nil { + return PageBlobUploadPagesResponse{}, err + } + + uploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + resp, err := pb.client.UploadPages(ctx, count, body, uploadPagesOptions, leaseAccessConditions, + cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return toPageBlobUploadPagesResponse(resp), handleError(err) +} + +// UploadPagesFromURL copies 1 or more pages from a source URL to the page blob. +// The sourceOffset specifies the start offset of source data to copy from. +// The destOffset specifies the start offset of data in page blob will be written to. +// The count must be a multiple of 512 bytes. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page-from-url. +func (pb *PageBlobClient) UploadPagesFromURL(ctx context.Context, source string, sourceOffset, destOffset, count int64, + options *PageBlobUploadPagesFromURLOptions) (PageBlobUploadPagesFromURLResponse, error) { + + uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions := options.format() + + resp, err := pb.client.UploadPagesFromURL(ctx, source, rangeToString(sourceOffset, count), 0, + rangeToString(destOffset, count), uploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, + sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + + return toPageBlobUploadPagesFromURLResponse(resp), handleError(err) +} + +// ClearPages frees the specified pages from the page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/put-page. +func (pb *PageBlobClient) ClearPages(ctx context.Context, pageRange HttpRange, options *PageBlobClearPagesOptions) (PageBlobClearPagesResponse, error) { + clearOptions := &pageBlobClientClearPagesOptions{ + Range: pageRange.format(), + } + + leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions := options.format() + + resp, err := pb.client.ClearPages(ctx, 0, clearOptions, leaseAccessConditions, cpkInfo, + cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + + return toPageBlobClearPagesResponse(resp), handleError(err) +} + +// GetPageRanges returns the list of valid page ranges for a page blob or snapshot of a page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *PageBlobClient) GetPageRanges(options *PageBlobGetPageRangesOptions) *PageBlobGetPageRangesPager { + getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions := options.format() + + pageBlobGetPageRangesPager := pb.client.GetPageRanges(getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) + + // Fixing Advancer + pageBlobGetPageRangesPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesResponse) (*policy.Request, error) { + getPageRangesOptions.Marker = response.NextMarker + req, err := pb.client.getPageRangesCreateRequest(ctx, getPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return nil, handleError(err) + } + queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery) + if err != nil { + return nil, handleError(err) + } + req.Raw().URL.RawQuery = queryValues.Encode() + return req, nil + } + + return toPageBlobGetPageRangesPager(pageBlobGetPageRangesPager) +} + +// GetPageRangesDiff gets the collection of page ranges that differ between a specified snapshot and this page blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/get-page-ranges. +func (pb *PageBlobClient) GetPageRangesDiff(options *PageBlobGetPageRangesDiffOptions) *PageBlobGetPageRangesDiffPager { + getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions := options.format() + + getPageRangesDiffPager := pb.client.GetPageRangesDiff(getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) + + // Fixing Advancer + getPageRangesDiffPager.advancer = func(ctx context.Context, response pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) { + getPageRangesDiffOptions.Marker = response.NextMarker + req, err := pb.client.getPageRangesDiffCreateRequest(ctx, getPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return nil, handleError(err) + } + queryValues, err := url.ParseQuery(req.Raw().URL.RawQuery) + if err != nil { + return nil, handleError(err) + } + req.Raw().URL.RawQuery = queryValues.Encode() + return req, nil + } + + return toPageBlobGetPageRangesDiffPager(getPageRangesDiffPager) +} + +// Resize resizes the page blob to the specified size (which must be a multiple of 512). +// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties. +func (pb *PageBlobClient) Resize(ctx context.Context, size int64, options *PageBlobResizeOptions) (PageBlobResizeResponse, error) { + resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions := options.format() + + resp, err := pb.client.Resize(ctx, size, resizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + + return toPageBlobResizeResponse(resp), handleError(err) +} + +// UpdateSequenceNumber sets the page blob's sequence number. +func (pb *PageBlobClient) UpdateSequenceNumber(ctx context.Context, options *PageBlobUpdateSequenceNumberOptions) (PageBlobUpdateSequenceNumberResponse, error) { + actionType, updateOptions, lac, mac := options.format() + resp, err := pb.client.UpdateSequenceNumber(ctx, *actionType, updateOptions, lac, mac) + + return toPageBlobUpdateSequenceNumberResponse(resp), handleError(err) +} + +// StartCopyIncremental begins an operation to start an incremental copy from one page blob's snapshot to this page blob. +// The snapshot is copied such that only the differential changes between the previously copied snapshot are transferred to the destination. +// The copied snapshots are complete copies of the original snapshot and can be read or copied from as usual. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/incremental-copy-blob and +// https://docs.microsoft.com/en-us/azure/virtual-machines/windows/incremental-snapshots. +func (pb *PageBlobClient) StartCopyIncremental(ctx context.Context, copySource string, prevSnapshot string, options *PageBlobCopyIncrementalOptions) (PageBlobCopyIncrementalResponse, error) { + copySourceURL, err := url.Parse(copySource) + if err != nil { + return PageBlobCopyIncrementalResponse{}, err + } + + queryParams := copySourceURL.Query() + queryParams.Set("snapshot", prevSnapshot) + copySourceURL.RawQuery = queryParams.Encode() + + pageBlobCopyIncrementalOptions, modifiedAccessConditions := options.format() + resp, err := pb.client.CopyIncremental(ctx, copySourceURL.String(), pageBlobCopyIncrementalOptions, modifiedAccessConditions) + + return toPageBlobCopyIncrementalResponse(resp), handleError(err) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go new file mode 100644 index 000000000..062587604 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_parsing_urls.go @@ -0,0 +1,184 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "net" + "net/url" + "strings" +) + +const ( + snapshot = "snapshot" + versionId = "versionid" + SnapshotTimeFormat = "2006-01-02T15:04:05.0000000Z07:00" +) + +// BlobURLParts object represents the components that make up an Azure Storage Container/Blob URL. You parse an +// existing URL into its parts by calling NewBlobURLParts(). You construct a URL from parts by calling URL(). +// NOTE: Changing any SAS-related field requires computing a new SAS signature. +type BlobURLParts struct { + Scheme string // Ex: "https://" + Host string // Ex: "account.blob.core.windows.net", "10.132.141.33", "10.132.141.33:80" + IPEndpointStyleInfo IPEndpointStyleInfo + ContainerName string // "" if no container + BlobName string // "" if no blob + Snapshot string // "" if not a snapshot + SAS SASQueryParameters + UnparsedParams string + VersionID string // "" if not versioning enabled +} + +// IPEndpointStyleInfo is used for IP endpoint style URL when working with Azure storage emulator. +// Ex: "https://10.132.141.33/accountname/containername" +type IPEndpointStyleInfo struct { + AccountName string // "" if not using IP endpoint style +} + +// isIPEndpointStyle checkes if URL's host is IP, in this case the storage account endpoint will be composed as: +// http(s)://IP(:port)/storageaccount/container/... +// As url's Host property, host could be both host or host:port +func isIPEndpointStyle(host string) bool { + if host == "" { + return false + } + if h, _, err := net.SplitHostPort(host); err == nil { + host = h + } + // For IPv6, there could be case where SplitHostPort fails for cannot finding port. + // In this case, eliminate the '[' and ']' in the URL. + // For details about IPv6 URL, please refer to https://tools.ietf.org/html/rfc2732 + if host[0] == '[' && host[len(host)-1] == ']' { + host = host[1 : len(host)-1] + } + return net.ParseIP(host) != nil +} + +// NewBlobURLParts parses a URL initializing BlobURLParts' fields including any SAS-related & snapshot query parameters. Any other +// query parameters remain in the UnparsedParams field. This method overwrites all fields in the BlobURLParts object. +func NewBlobURLParts(u string) (BlobURLParts, error) { + uri, err := url.Parse(u) + if err != nil { + return BlobURLParts{}, err + } + + up := BlobURLParts{ + Scheme: uri.Scheme, + Host: uri.Host, + } + + // Find the container & blob names (if any) + if uri.Path != "" { + path := uri.Path + if path[0] == '/' { + path = path[1:] // If path starts with a slash, remove it + } + if isIPEndpointStyle(up.Host) { + if accountEndIndex := strings.Index(path, "/"); accountEndIndex == -1 { // Slash not found; path has account name & no container name or blob + up.IPEndpointStyleInfo.AccountName = path + path = "" // No ContainerName present in the URL so path should be empty + } else { + up.IPEndpointStyleInfo.AccountName = path[:accountEndIndex] // The account name is the part between the slashes + path = path[accountEndIndex+1:] // path refers to portion after the account name now (container & blob names) + } + } + + containerEndIndex := strings.Index(path, "/") // Find the next slash (if it exists) + if containerEndIndex == -1 { // Slash not found; path has container name & no blob name + up.ContainerName = path + } else { + up.ContainerName = path[:containerEndIndex] // The container name is the part between the slashes + up.BlobName = path[containerEndIndex+1:] // The blob name is after the container slash + } + } + + // Convert the query parameters to a case-sensitive map & trim whitespace + paramsMap := uri.Query() + + up.Snapshot = "" // Assume no snapshot + if snapshotStr, ok := caseInsensitiveValues(paramsMap).Get(snapshot); ok { + up.Snapshot = snapshotStr[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, snapshot) + } + + up.VersionID = "" // Assume no versionID + if versionIDs, ok := caseInsensitiveValues(paramsMap).Get(versionId); ok { + up.VersionID = versionIDs[0] + // If we recognized the query parameter, remove it from the map + delete(paramsMap, versionId) // delete "versionid" from paramsMap + delete(paramsMap, "versionId") // delete "versionId" from paramsMap + } + + up.SAS = newSASQueryParameters(paramsMap, true) + up.UnparsedParams = paramsMap.Encode() + return up, nil +} + +type caseInsensitiveValues url.Values // map[string][]string +func (values caseInsensitiveValues) Get(key string) ([]string, bool) { + key = strings.ToLower(key) + for k, v := range values { + if strings.ToLower(k) == key { + return v, true + } + } + return []string{}, false +} + +// URL returns a URL object whose fields are initialized from the BlobURLParts fields. The URL's RawQuery +// field contains the SAS, snapshot, and unparsed query parameters. +func (up BlobURLParts) URL() string { + path := "" + if isIPEndpointStyle(up.Host) && up.IPEndpointStyleInfo.AccountName != "" { + path += "/" + up.IPEndpointStyleInfo.AccountName + } + // Concatenate container & blob names (if they exist) + if up.ContainerName != "" { + path += "/" + up.ContainerName + if up.BlobName != "" { + path += "/" + up.BlobName + } + } + + rawQuery := up.UnparsedParams + + //If no snapshot is initially provided, fill it in from the SAS query properties to help the user + if up.Snapshot == "" && !up.SAS.snapshotTime.IsZero() { + up.Snapshot = up.SAS.snapshotTime.Format(SnapshotTimeFormat) + } + + // Concatenate blob version id query parameter (if it exists) + if up.VersionID != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += versionId + "=" + up.VersionID + } + + // Concatenate blob snapshot query parameter (if it exists) + if up.Snapshot != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += snapshot + "=" + up.Snapshot + } + sas := up.SAS.Encode() + if sas != "" { + if len(rawQuery) > 0 { + rawQuery += "&" + } + rawQuery += sas + } + u := url.URL{ + Scheme: up.Scheme, + Host: up.Host, + Path: path, + RawQuery: rawQuery, + } + return u.String() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go new file mode 100644 index 000000000..3f9878439 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_error.go @@ -0,0 +1,17 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import "net/http" + +// ResponseError is a wrapper of error passed from service +type ResponseError interface { + Error() string + Unwrap() error + RawResponse() *http.Response + NonRetriable() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go new file mode 100644 index 000000000..dda993d1c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_response_helpers.go @@ -0,0 +1,35 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +// GetHTTPHeaders returns the user-modifiable properties for this blob. +func (bgpr BlobGetPropertiesResponse) GetHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + BlobContentType: bgpr.ContentType, + BlobContentEncoding: bgpr.ContentEncoding, + BlobContentLanguage: bgpr.ContentLanguage, + BlobContentDisposition: bgpr.ContentDisposition, + BlobCacheControl: bgpr.CacheControl, + BlobContentMD5: bgpr.ContentMD5, + } +} + +/////////////////////////////////////////////////////////////////////////////// + +// GetHTTPHeaders returns the user-modifiable properties for this blob. +func (r BlobDownloadResponse) GetHTTPHeaders() BlobHTTPHeaders { + return BlobHTTPHeaders{ + BlobContentType: r.ContentType, + BlobContentEncoding: r.ContentEncoding, + BlobContentLanguage: r.ContentLanguage, + BlobContentDisposition: r.ContentDisposition, + BlobCacheControl: r.CacheControl, + BlobContentMD5: r.ContentMD5, + } +} + +/////////////////////////////////////////////////////////////////////////////// diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go new file mode 100644 index 000000000..3179138f1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_retry_reader.go @@ -0,0 +1,194 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "io" + "net" + "net/http" + "strings" + "sync" +) + +const CountToEnd = 0 + +// HTTPGetter is a function type that refers to a method that performs an HTTP GET operation. +type HTTPGetter func(ctx context.Context, i HTTPGetterInfo) (*http.Response, error) + +// HTTPGetterInfo is passed to an HTTPGetter function passing it parameters +// that should be used to make an HTTP GET request. +type HTTPGetterInfo struct { + // Offset specifies the start offset that should be used when + // creating the HTTP GET request's Range header + Offset int64 + + // Count specifies the count of bytes that should be used to calculate + // the end offset when creating the HTTP GET request's Range header + Count int64 + + // ETag specifies the resource's etag that should be used when creating + // the HTTP GET request's If-Match header + ETag string +} + +// FailedReadNotifier is a function type that represents the notification function called when a read fails +type FailedReadNotifier func(failureCount int, lastError error, offset int64, count int64, willRetry bool) + +// RetryReaderOptions contains properties which can help to decide when to do retry. +type RetryReaderOptions struct { + // MaxRetryRequests specifies the maximum number of HTTP GET requests that will be made + // while reading from a RetryReader. A value of zero means that no additional HTTP + // GET requests will be made. + MaxRetryRequests int + doInjectError bool + doInjectErrorRound int + injectedError error + + // NotifyFailedRead is called, if non-nil, after any failure to read. Expected usage is diagnostic logging. + NotifyFailedRead FailedReadNotifier + + // TreatEarlyCloseAsError can be set to true to prevent retries after "read on closed response body". By default, + // retryReader has the following special behaviour: closing the response body before it is all read is treated as a + // retryable error. This is to allow callers to force a retry by closing the body from another goroutine (e.g. if the = + // read is too slow, caller may want to force a retry in the hope that the retry will be quicker). If + // TreatEarlyCloseAsError is true, then retryReader's special behaviour is suppressed, and "read on closed body" is instead + // treated as a fatal (non-retryable) error. + // Note that setting TreatEarlyCloseAsError only guarantees that Closing will produce a fatal error if the Close happens + // from the same "thread" (goroutine) as Read. Concurrent Close calls from other goroutines may instead produce network errors + // which will be retried. + TreatEarlyCloseAsError bool + + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +// retryReader implements io.ReaderCloser methods. +// retryReader tries to read from response, and if there is retriable network error +// returned during reading, it will retry according to retry reader option through executing +// user defined action with provided data to get a new response, and continue the overall reading process +// through reading from the new response. +type retryReader struct { + ctx context.Context + info HTTPGetterInfo + countWasBounded bool + o RetryReaderOptions + getter HTTPGetter + + // we support Close-ing during Reads (from other goroutines), so we protect the shared state, which is response + responseMu *sync.Mutex + response *http.Response +} + +// NewRetryReader creates a retry reader. +func NewRetryReader(ctx context.Context, initialResponse *http.Response, + info HTTPGetterInfo, o RetryReaderOptions, getter HTTPGetter) io.ReadCloser { + return &retryReader{ + ctx: ctx, + getter: getter, + info: info, + countWasBounded: info.Count != CountToEnd, + response: initialResponse, + responseMu: &sync.Mutex{}, + o: o} +} + +func (s *retryReader) setResponse(r *http.Response) { + s.responseMu.Lock() + defer s.responseMu.Unlock() + s.response = r +} + +func (s *retryReader) Read(p []byte) (n int, err error) { + for try := 0; ; try++ { + //fmt.Println(try) // Comment out for debugging. + if s.countWasBounded && s.info.Count == CountToEnd { + // User specified an original count and the remaining bytes are 0, return 0, EOF + return 0, io.EOF + } + + s.responseMu.Lock() + resp := s.response + s.responseMu.Unlock() + if resp == nil { // We don't have a response stream to read from, try to get one. + newResponse, err := s.getter(s.ctx, s.info) + if err != nil { + return 0, err + } + // Successful GET; this is the network stream we'll read from. + s.setResponse(newResponse) + resp = newResponse + } + n, err := resp.Body.Read(p) // Read from the stream (this will return non-nil err if forceRetry is called, from another goroutine, while it is running) + + // Injection mechanism for testing. + if s.o.doInjectError && try == s.o.doInjectErrorRound { + if s.o.injectedError != nil { + err = s.o.injectedError + } else { + err = &net.DNSError{IsTemporary: true} + } + } + + // We successfully read data or end EOF. + if err == nil || err == io.EOF { + s.info.Offset += int64(n) // Increments the start offset in case we need to make a new HTTP request in the future + if s.info.Count != CountToEnd { + s.info.Count -= int64(n) // Decrement the count in case we need to make a new HTTP request in the future + } + return n, err // Return the return to the caller + } + _ = s.Close() + + s.setResponse(nil) // Our stream is no longer good + + // Check the retry count and error code, and decide whether to retry. + retriesExhausted := try >= s.o.MaxRetryRequests + _, isNetError := err.(net.Error) + isUnexpectedEOF := err == io.ErrUnexpectedEOF + willRetry := (isNetError || isUnexpectedEOF || s.wasRetryableEarlyClose(err)) && !retriesExhausted + + // Notify, for logging purposes, of any failures + if s.o.NotifyFailedRead != nil { + failureCount := try + 1 // because try is zero-based + s.o.NotifyFailedRead(failureCount, err, s.info.Offset, s.info.Count, willRetry) + } + + if willRetry { + continue + // Loop around and try to get and read from new stream. + } + return n, err // Not retryable, or retries exhausted, so just return + } +} + +// By default, we allow early Closing, from another concurrent goroutine, to be used to force a retry +// Is this safe, to close early from another goroutine? Early close ultimately ends up calling +// net.Conn.Close, and that is documented as "Any blocked Read or Write operations will be unblocked and return errors" +// which is exactly the behaviour we want. +// NOTE: that if caller has forced an early Close from a separate goroutine (separate from the Read) +// then there are two different types of error that may happen - either the one one we check for here, +// or a net.Error (due to closure of connection). Which one happens depends on timing. We only need this routine +// to check for one, since the other is a net.Error, which our main Read retry loop is already handing. +func (s *retryReader) wasRetryableEarlyClose(err error) bool { + if s.o.TreatEarlyCloseAsError { + return false // user wants all early closes to be errors, and so not retryable + } + // unfortunately, http.errReadOnClosedResBody is private, so the best we can do here is to check for its text + return strings.HasSuffix(err.Error(), ReadOnClosedBodyMessage) +} + +const ReadOnClosedBodyMessage = "read on closed response body" + +func (s *retryReader) Close() error { + s.responseMu.Lock() + defer s.responseMu.Unlock() + if s.response != nil && s.response.Body != nil { + return s.response.Body.Close() + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go new file mode 100644 index 000000000..b4104def5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_account.go @@ -0,0 +1,243 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "errors" + "fmt" + "strings" + "time" +) + +// AccountSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas +type AccountSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing a AccountSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Services string `param:"ss"` // Create by initializing AccountSASServices and then call String() + ResourceTypes string `param:"srt"` // Create by initializing AccountSASResourceTypes and then call String() +} + +// Sign uses an account's shared key credential to sign this signature values to produce +// the proper SAS query parameters. +func (v AccountSASSignatureValues) Sign(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/Constructing-an-Account-SAS + if v.ExpiryTime.IsZero() || v.Permissions == "" || v.ResourceTypes == "" || v.Services == "" { + return SASQueryParameters{}, errors.New("account SAS is missing at least one of these: ExpiryTime, Permissions, Service, or ResourceType") + } + if v.Version == "" { + v.Version = SASVersion + } + perms := &AccountSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + + startTime, expiryTime, _ := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, time.Time{}) + + stringToSign := strings.Join([]string{ + sharedKeyCredential.AccountName(), + v.Permissions, + v.Services, + v.ResourceTypes, + startTime, + expiryTime, + v.IPRange.String(), + string(v.Protocol), + v.Version, + ""}, // That right, the account SAS requires a terminating extra newline + "\n") + + signature, err := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + if err != nil { + return SASQueryParameters{}, err + } + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Account-specific SAS parameters + services: v.Services, + resourceTypes: v.ResourceTypes, + + // Calculated SAS signature + signature: signature, + } + + return p, nil +} + +// AccountSASPermissions type simplifies creating the permissions string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Permissions field. +type AccountSASPermissions struct { + Read, Write, Delete, DeletePreviousVersion, List, Add, Create, Update, Process, Tag, FilterByTags bool +} + +// String produces the SAS permissions string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Permissions field. +func (p AccountSASPermissions) String() string { + var buffer bytes.Buffer + if p.Read { + buffer.WriteRune('r') + } + if p.Write { + buffer.WriteRune('w') + } + if p.Delete { + buffer.WriteRune('d') + } + if p.DeletePreviousVersion { + buffer.WriteRune('x') + } + if p.List { + buffer.WriteRune('l') + } + if p.Add { + buffer.WriteRune('a') + } + if p.Create { + buffer.WriteRune('c') + } + if p.Update { + buffer.WriteRune('u') + } + if p.Process { + buffer.WriteRune('p') + } + if p.Tag { + buffer.WriteRune('t') + } + if p.FilterByTags { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASPermissions's fields from a string. +func (p *AccountSASPermissions) Parse(s string) error { + *p = AccountSASPermissions{} // Clear out the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'l': + p.List = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'u': + p.Update = true + case 'p': + p.Process = true + case 'x': + p.Process = true + case 't': + p.Tag = true + case 'f': + p.FilterByTags = true + default: + return fmt.Errorf("invalid permission character: '%v'", r) + } + } + return nil +} + +// AccountSASServices type simplifies creating the services string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's Services field. +type AccountSASServices struct { + Blob, Queue, File bool +} + +// String produces the SAS services string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's Services field. +func (s AccountSASServices) String() string { + var buffer bytes.Buffer + if s.Blob { + buffer.WriteRune('b') + } + if s.Queue { + buffer.WriteRune('q') + } + if s.File { + buffer.WriteRune('f') + } + return buffer.String() +} + +// Parse initializes the AccountSASServices' fields from a string. +func (s *AccountSASServices) Parse(str string) error { + *s = AccountSASServices{} // Clear out the flags + for _, r := range str { + switch r { + case 'b': + s.Blob = true + case 'q': + s.Queue = true + case 'f': + s.File = true + default: + return fmt.Errorf("invalid service character: '%v'", r) + } + } + return nil +} + +// AccountSASResourceTypes type simplifies creating the resource types string for an Azure Storage Account SAS. +// Initialize an instance of this type and then call its String method to set AccountSASSignatureValues's ResourceTypes field. +type AccountSASResourceTypes struct { + Service, Container, Object bool +} + +// String produces the SAS resource types string for an Azure Storage account. +// Call this method to set AccountSASSignatureValues's ResourceTypes field. +func (rt AccountSASResourceTypes) String() string { + var buffer bytes.Buffer + if rt.Service { + buffer.WriteRune('s') + } + if rt.Container { + buffer.WriteRune('c') + } + if rt.Object { + buffer.WriteRune('o') + } + return buffer.String() +} + +// Parse initializes the AccountSASResourceType's fields from a string. +func (rt *AccountSASResourceTypes) Parse(s string) error { + *rt = AccountSASResourceTypes{} // Clear out the flags + for _, r := range s { + switch r { + case 's': + rt.Service = true + case 'c': + rt.Container = true + case 'o': + rt.Object = true + default: + return fmt.Errorf("invalid resource type: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go new file mode 100644 index 000000000..7efbec9b8 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_query_params.go @@ -0,0 +1,427 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "errors" + "net" + "net/url" + "strings" + "time" +) + +// SASProtocol indicates the http/https. +type SASProtocol string + +const ( + // SASProtocolHTTPS can be specified for a SAS protocol + SASProtocolHTTPS SASProtocol = "https" + + // SASProtocolHTTPSandHTTP can be specified for a SAS protocol + //SASProtocolHTTPSandHTTP SASProtocol = "https,http" +) + +// FormatTimesForSASSigning converts a time.Time to a snapshotTimeFormat string suitable for a +// SASField's StartTime or ExpiryTime fields. Returns "" if value.IsZero(). +func FormatTimesForSASSigning(startTime, expiryTime, snapshotTime time.Time) (string, string, string) { + ss := "" + if !startTime.IsZero() { + ss = formatSASTimeWithDefaultFormat(&startTime) + } + se := "" + if !expiryTime.IsZero() { + se = formatSASTimeWithDefaultFormat(&expiryTime) + } + sh := "" + if !snapshotTime.IsZero() { + sh = snapshotTime.Format(SnapshotTimeFormat) + } + return ss, se, sh +} + +// SASTimeFormat represents the format of a SAS start or expiry time. Use it when formatting/parsing a time.Time. +const SASTimeFormat = "2006-01-02T15:04:05Z" //"2017-07-27T00:00:00Z" // ISO 8601 +var SASTimeFormats = []string{"2006-01-02T15:04:05.0000000Z", SASTimeFormat, "2006-01-02T15:04Z", "2006-01-02"} // ISO 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details. + +// formatSASTimeWithDefaultFormat format time with ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ". +func formatSASTimeWithDefaultFormat(t *time.Time) string { + return formatSASTime(t, SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// formatSASTime format time with given format, use ISO 8601 in "yyyy-MM-ddTHH:mm:ssZ" by default. +func formatSASTime(t *time.Time, format string) string { + if format != "" { + return t.Format(format) + } + return t.Format(SASTimeFormat) // By default, "yyyy-MM-ddTHH:mm:ssZ" is used +} + +// parseSASTimeString try to parse sas time string. +func parseSASTimeString(val string) (t time.Time, timeFormat string, err error) { + for _, sasTimeFormat := range SASTimeFormats { + t, err = time.Parse(sasTimeFormat, val) + if err == nil { + timeFormat = sasTimeFormat + break + } + } + + if err != nil { + err = errors.New("fail to parse time with IOS 8601 formats, please refer to https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas for more details") + } + + return +} + +// https://docs.microsoft.com/en-us/rest/api/storageservices/constructing-a-service-sas + +// SASQueryParameters object represents the components that make up an Azure Storage SAS' query parameters. +// You parse a map of query parameters into its fields by calling NewSASQueryParameters(). You add the components +// to a query parameter map by calling AddToValues(). +// NOTE: Changing any field requires computing a new SAS signature using a XxxSASSignatureValues type. +// This type defines the components used by all Azure Storage resources (Containers, Blobs, Files, & Queues). +type SASQueryParameters struct { + // All members are immutable or values so copies of this struct are goroutine-safe. + version string `param:"sv"` + services string `param:"ss"` + resourceTypes string `param:"srt"` + protocol SASProtocol `param:"spr"` + startTime time.Time `param:"st"` + expiryTime time.Time `param:"se"` + snapshotTime time.Time `param:"snapshot"` + ipRange IPRange `param:"sip"` + identifier string `param:"si"` + resource string `param:"sr"` + permissions string `param:"sp"` + signature string `param:"sig"` + cacheControl string `param:"rscc"` + contentDisposition string `param:"rscd"` + contentEncoding string `param:"rsce"` + contentLanguage string `param:"rscl"` + contentType string `param:"rsct"` + signedOid string `param:"skoid"` + signedTid string `param:"sktid"` + signedStart time.Time `param:"skt"` + signedService string `param:"sks"` + signedExpiry time.Time `param:"ske"` + signedVersion string `param:"skv"` + signedDirectoryDepth string `param:"sdd"` + preauthorizedAgentObjectId string `param:"saoid"` + agentObjectId string `param:"suoid"` + correlationId string `param:"scid"` + // private member used for startTime and expiryTime formatting. + stTimeFormat string + seTimeFormat string +} + +// PreauthorizedAgentObjectId returns preauthorizedAgentObjectId +func (p *SASQueryParameters) PreauthorizedAgentObjectId() string { + return p.preauthorizedAgentObjectId +} + +// AgentObjectId returns agentObjectId +func (p *SASQueryParameters) AgentObjectId() string { + return p.agentObjectId +} + +// SignedCorrelationId returns signedCorrelationId +func (p *SASQueryParameters) SignedCorrelationId() string { + return p.correlationId +} + +// SignedTid returns aignedTid +func (p *SASQueryParameters) SignedTid() string { + return p.signedTid +} + +// SignedStart returns signedStart +func (p *SASQueryParameters) SignedStart() time.Time { + return p.signedStart +} + +// SignedExpiry returns signedExpiry +func (p *SASQueryParameters) SignedExpiry() time.Time { + return p.signedExpiry +} + +// SignedService returns signedService +func (p *SASQueryParameters) SignedService() string { + return p.signedService +} + +// SignedVersion returns signedVersion +func (p *SASQueryParameters) SignedVersion() string { + return p.signedVersion +} + +// SnapshotTime returns snapshotTime +func (p *SASQueryParameters) SnapshotTime() time.Time { + return p.snapshotTime +} + +// Version returns version +func (p *SASQueryParameters) Version() string { + return p.version +} + +// Services returns services +func (p *SASQueryParameters) Services() string { + return p.services +} + +// ResourceTypes returns resourceTypes +func (p *SASQueryParameters) ResourceTypes() string { + return p.resourceTypes +} + +// Protocol returns protocol +func (p *SASQueryParameters) Protocol() SASProtocol { + return p.protocol +} + +// StartTime returns startTime +func (p *SASQueryParameters) StartTime() time.Time { + return p.startTime +} + +// ExpiryTime returns expiryTime +func (p *SASQueryParameters) ExpiryTime() time.Time { + return p.expiryTime +} + +// IPRange returns ipRange +func (p *SASQueryParameters) IPRange() IPRange { + return p.ipRange +} + +// Identifier returns identifier +func (p *SASQueryParameters) Identifier() string { + return p.identifier +} + +// Resource returns resource +func (p *SASQueryParameters) Resource() string { + return p.resource +} + +// Permissions returns permissions +func (p *SASQueryParameters) Permissions() string { + return p.permissions +} + +// Signature returns signature +func (p *SASQueryParameters) Signature() string { + return p.signature +} + +// CacheControl returns cacheControl +func (p *SASQueryParameters) CacheControl() string { + return p.cacheControl +} + +// ContentDisposition returns contentDisposition +func (p *SASQueryParameters) ContentDisposition() string { + return p.contentDisposition +} + +// ContentEncoding returns contentEncoding +func (p *SASQueryParameters) ContentEncoding() string { + return p.contentEncoding +} + +// ContentLanguage returns contentLanguage +func (p *SASQueryParameters) ContentLanguage() string { + return p.contentLanguage +} + +// ContentType returns sontentType +func (p *SASQueryParameters) ContentType() string { + return p.contentType +} + +// SignedDirectoryDepth returns signedDirectoryDepth +func (p *SASQueryParameters) SignedDirectoryDepth() string { + return p.signedDirectoryDepth +} + +// IPRange represents a SAS IP range's start IP and (optionally) end IP. +type IPRange struct { + Start net.IP // Not specified if length = 0 + End net.IP // Not specified if length = 0 +} + +// String returns a string representation of an IPRange. +func (ipr *IPRange) String() string { + if len(ipr.Start) == 0 { + return "" + } + start := ipr.Start.String() + if len(ipr.End) == 0 { + return start + } + return start + "-" + ipr.End.String() +} + +// NewSASQueryParameters creates and initializes a SASQueryParameters object based on the +// query parameter map's passed-in values. If deleteSASParametersFromValues is true, +// all SAS-related query parameters are removed from the passed-in map. If +// deleteSASParametersFromValues is false, the map passed-in map is unaltered. +func newSASQueryParameters(values url.Values, deleteSASParametersFromValues bool) SASQueryParameters { + p := SASQueryParameters{} + for k, v := range values { + val := v[0] + isSASKey := true + switch strings.ToLower(k) { + case "sv": + p.version = val + case "ss": + p.services = val + case "srt": + p.resourceTypes = val + case "spr": + p.protocol = SASProtocol(val) + case "snapshot": + p.snapshotTime, _ = time.Parse(SnapshotTimeFormat, val) + case "st": + p.startTime, p.stTimeFormat, _ = parseSASTimeString(val) + case "se": + p.expiryTime, p.seTimeFormat, _ = parseSASTimeString(val) + case "sip": + dashIndex := strings.Index(val, "-") + if dashIndex == -1 { + p.ipRange.Start = net.ParseIP(val) + } else { + p.ipRange.Start = net.ParseIP(val[:dashIndex]) + p.ipRange.End = net.ParseIP(val[dashIndex+1:]) + } + case "si": + p.identifier = val + case "sr": + p.resource = val + case "sp": + p.permissions = val + case "sig": + p.signature = val + case "rscc": + p.cacheControl = val + case "rscd": + p.contentDisposition = val + case "rsce": + p.contentEncoding = val + case "rscl": + p.contentLanguage = val + case "rsct": + p.contentType = val + case "skoid": + p.signedOid = val + case "sktid": + p.signedTid = val + case "skt": + p.signedStart, _ = time.Parse(SASTimeFormat, val) + case "ske": + p.signedExpiry, _ = time.Parse(SASTimeFormat, val) + case "sks": + p.signedService = val + case "skv": + p.signedVersion = val + case "sdd": + p.signedDirectoryDepth = val + case "saoid": + p.preauthorizedAgentObjectId = val + case "suoid": + p.agentObjectId = val + case "scid": + p.correlationId = val + default: + isSASKey = false // We didn't recognize the query parameter + } + if isSASKey && deleteSASParametersFromValues { + delete(values, k) + } + } + return p +} + +// AddToValues adds the SAS components to the specified query parameters map. +func (p *SASQueryParameters) addToValues(v url.Values) url.Values { + if p.version != "" { + v.Add("sv", p.version) + } + if p.services != "" { + v.Add("ss", p.services) + } + if p.resourceTypes != "" { + v.Add("srt", p.resourceTypes) + } + if p.protocol != "" { + v.Add("spr", string(p.protocol)) + } + if !p.startTime.IsZero() { + v.Add("st", formatSASTime(&(p.startTime), p.stTimeFormat)) + } + if !p.expiryTime.IsZero() { + v.Add("se", formatSASTime(&(p.expiryTime), p.seTimeFormat)) + } + if len(p.ipRange.Start) > 0 { + v.Add("sip", p.ipRange.String()) + } + if p.identifier != "" { + v.Add("si", p.identifier) + } + if p.resource != "" { + v.Add("sr", p.resource) + } + if p.permissions != "" { + v.Add("sp", p.permissions) + } + if p.signedOid != "" { + v.Add("skoid", p.signedOid) + v.Add("sktid", p.signedTid) + v.Add("skt", p.signedStart.Format(SASTimeFormat)) + v.Add("ske", p.signedExpiry.Format(SASTimeFormat)) + v.Add("sks", p.signedService) + v.Add("skv", p.signedVersion) + } + if p.signature != "" { + v.Add("sig", p.signature) + } + if p.cacheControl != "" { + v.Add("rscc", p.cacheControl) + } + if p.contentDisposition != "" { + v.Add("rscd", p.contentDisposition) + } + if p.contentEncoding != "" { + v.Add("rsce", p.contentEncoding) + } + if p.contentLanguage != "" { + v.Add("rscl", p.contentLanguage) + } + if p.contentType != "" { + v.Add("rsct", p.contentType) + } + if p.signedDirectoryDepth != "" { + v.Add("sdd", p.signedDirectoryDepth) + } + if p.preauthorizedAgentObjectId != "" { + v.Add("saoid", p.preauthorizedAgentObjectId) + } + if p.agentObjectId != "" { + v.Add("suoid", p.agentObjectId) + } + if p.correlationId != "" { + v.Add("scid", p.correlationId) + } + return v +} + +// Encode encodes the SAS query parameters into URL encoded form sorted by key. +func (p *SASQueryParameters) Encode() string { + v := url.Values{} + p.addToValues(v) + return v.Encode() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go new file mode 100644 index 000000000..488baed8c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_sas_service.go @@ -0,0 +1,365 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "fmt" + "strings" + "time" +) + +// BlobSASSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage container or blob. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-a-service-sas +type BlobSASSignatureValues struct { + Version string `param:"sv"` // If not specified, this defaults to SASVersion + Protocol SASProtocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + SnapshotTime time.Time + Permissions string `param:"sp"` // Create by initializing a ContainerSASPermissions or BlobSASPermissions and then call String() + IPRange IPRange `param:"sip"` + Identifier string `param:"si"` + ContainerName string + BlobName string // Use "" to create a Container SAS + Directory string // Not nil for a directory SAS (ie sr=d) + CacheControl string // rscc + ContentDisposition string // rscd + ContentEncoding string // rsce + ContentLanguage string // rscl + ContentType string // rsct + BlobVersion string // sr=bv + PreauthorizedAgentObjectId string + AgentObjectId string + CorrelationId string +} + +func getDirectoryDepth(path string) string { + if path == "" { + return "" + } + return fmt.Sprint(strings.Count(path, "/") + 1) +} + +// NewSASQueryParameters uses an account's StorageAccountCredential to sign this signature values to produce +// the proper SAS query parameters. +// See: StorageAccountCredential. Compatible with both UserDelegationCredential and SharedKeyCredential +func (v BlobSASSignatureValues) NewSASQueryParameters(sharedKeyCredential *SharedKeyCredential) (SASQueryParameters, error) { + resource := "c" + if sharedKeyCredential == nil { + return SASQueryParameters{}, fmt.Errorf("cannot sign SAS query without Shared Key Credential") + } + + if !v.SnapshotTime.IsZero() { + resource = "bs" + //Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.BlobVersion != "" { + resource = "bv" + //Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.Directory != "" { + resource = "d" + v.BlobName = "" + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else if v.BlobName == "" { + // Make sure the permission characters are in the correct order + perms := &ContainerSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } else { + resource = "b" + // Make sure the permission characters are in the correct order + perms := &BlobSASPermissions{} + if err := perms.Parse(v.Permissions); err != nil { + return SASQueryParameters{}, err + } + v.Permissions = perms.String() + } + if v.Version == "" { + v.Version = SASVersion + } + startTime, expiryTime, snapshotTime := FormatTimesForSASSigning(v.StartTime, v.ExpiryTime, v.SnapshotTime) + + signedIdentifier := v.Identifier + + //udk := sharedKeyCredential.getUDKParams() + // + //if udk != nil { + // udkStart, udkExpiry, _ := FormatTimesForSASSigning(udk.SignedStart, udk.SignedExpiry, time.Time{}) + // //I don't like this answer to combining the functions + // //But because signedIdentifier and the user delegation key strings share a place, this is an _OK_ way to do it. + // signedIdentifier = strings.Join([]string{ + // udk.SignedOid, + // udk.SignedTid, + // udkStart, + // udkExpiry, + // udk.SignedService, + // udk.SignedVersion, + // v.PreauthorizedAgentObjectId, + // v.AgentObjectId, + // v.CorrelationId, + // }, "\n") + //} + + // String to sign: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + stringToSign := strings.Join([]string{ + v.Permissions, + startTime, + expiryTime, + getCanonicalName(sharedKeyCredential.AccountName(), v.ContainerName, v.BlobName, v.Directory), + signedIdentifier, + v.IPRange.String(), + string(v.Protocol), + v.Version, + resource, + snapshotTime, // signed timestamp + v.CacheControl, // rscc + v.ContentDisposition, // rscd + v.ContentEncoding, // rsce + v.ContentLanguage, // rscl + v.ContentType}, // rsct + "\n") + + signature := "" + signature, err := sharedKeyCredential.ComputeHMACSHA256(stringToSign) + if err != nil { + return SASQueryParameters{}, err + } + + p := SASQueryParameters{ + // Common SAS parameters + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + + // Container/Blob-specific SAS parameters + resource: resource, + identifier: v.Identifier, + cacheControl: v.CacheControl, + contentDisposition: v.ContentDisposition, + contentEncoding: v.ContentEncoding, + contentLanguage: v.ContentLanguage, + contentType: v.ContentType, + snapshotTime: v.SnapshotTime, + signedDirectoryDepth: getDirectoryDepth(v.Directory), + preauthorizedAgentObjectId: v.PreauthorizedAgentObjectId, + agentObjectId: v.AgentObjectId, + correlationId: v.CorrelationId, + // Calculated SAS signature + signature: signature, + } + + ////User delegation SAS specific parameters + //if udk != nil { + // p.signedOid = udk.SignedOid + // p.signedTid = udk.SignedTid + // p.signedStart = udk.SignedStart + // p.signedExpiry = udk.SignedExpiry + // p.signedService = udk.SignedService + // p.signedVersion = udk.SignedVersion + //} + + return p, nil +} + +// getCanonicalName computes the canonical name for a container or blob resource for SAS signing. +func getCanonicalName(account string, containerName string, blobName string, directoryName string) string { + // Container: "/blob/account/containername" + // Blob: "/blob/account/containername/blobname" + elements := []string{"/blob/", account, "/", containerName} + if blobName != "" { + elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1)) + } else if directoryName != "" { + elements = append(elements, "/", directoryName) + } + return strings.Join(elements, "") +} + +// ContainerSASPermissions type simplifies creating the permissions string for an Azure Storage container SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +// All permissions descriptions can be found here: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-directory-container-or-blob +type ContainerSASPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, List, Tag bool + Execute, ModifyOwnership, ModifyPermissions bool // Hierarchical Namespace only +} + +// String produces the SAS permissions string for an Azure Storage container. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p ContainerSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.List { + b.WriteRune('l') + } + if p.Tag { + b.WriteRune('t') + } + if p.Execute { + b.WriteRune('e') + } + if p.ModifyOwnership { + b.WriteRune('o') + } + if p.ModifyPermissions { + b.WriteRune('p') + } + return b.String() +} + +// Parse initializes the ContainerSASPermissions's fields from a string. +func (p *ContainerSASPermissions) Parse(s string) error { + *p = ContainerSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 'l': + p.List = true + case 't': + p.Tag = true + case 'e': + p.Execute = true + case 'o': + p.ModifyOwnership = true + case 'p': + p.ModifyPermissions = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} + +// BlobSASPermissions type simplifies creating the permissions string for an Azure Storage blob SAS. +// Initialize an instance of this type and then call its String method to set BlobSASSignatureValues's Permissions field. +type BlobSASPermissions struct { + Read, Add, Create, Write, Delete, DeletePreviousVersion, Tag, List, Move, Execute, Ownership, Permissions bool +} + +// String produces the SAS permissions string for an Azure Storage blob. +// Call this method to set BlobSASSignatureValues's Permissions field. +func (p BlobSASPermissions) String() string { + var b bytes.Buffer + if p.Read { + b.WriteRune('r') + } + if p.Add { + b.WriteRune('a') + } + if p.Create { + b.WriteRune('c') + } + if p.Write { + b.WriteRune('w') + } + if p.Delete { + b.WriteRune('d') + } + if p.DeletePreviousVersion { + b.WriteRune('x') + } + if p.Tag { + b.WriteRune('t') + } + if p.List { + b.WriteRune('l') + } + if p.Move { + b.WriteRune('m') + } + if p.Execute { + b.WriteRune('e') + } + if p.Ownership { + b.WriteRune('o') + } + if p.Permissions { + b.WriteRune('p') + } + return b.String() +} + +// Parse initializes the BlobSASPermissions's fields from a string. +func (p *BlobSASPermissions) Parse(s string) error { + *p = BlobSASPermissions{} // Clear the flags + for _, r := range s { + switch r { + case 'r': + p.Read = true + case 'a': + p.Add = true + case 'c': + p.Create = true + case 'w': + p.Write = true + case 'd': + p.Delete = true + case 'x': + p.DeletePreviousVersion = true + case 't': + p.Tag = true + case 'l': + p.List = true + case 'm': + p.Move = true + case 'e': + p.Execute = true + case 'o': + p.Ownership = true + case 'p': + p.Permissions = true + default: + return fmt.Errorf("invalid permission: '%v'", r) + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go new file mode 100644 index 000000000..e75dd10b3 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_service_client.go @@ -0,0 +1,266 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "errors" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +//nolint +const ( + // ContainerNameRoot is the special Azure Storage name used to identify a storage account's root container. + ContainerNameRoot = "$root" + + // ContainerNameLogs is the special Azure Storage name used to identify a storage account's logs container. + ContainerNameLogs = "$logs" +) + +// ServiceClient represents a URL to the Azure Blob Storage service allowing you to manipulate blob containers. +type ServiceClient struct { + client *serviceClient + sharedKey *SharedKeyCredential +} + +// URL returns the URL endpoint used by the ServiceClient object. +func (s ServiceClient) URL() string { + return s.client.endpoint +} + +// NewServiceClient creates a ServiceClient object using the specified URL, Azure AD credential, and options. +// Example of serviceURL: https://.blob.core.windows.net +func NewServiceClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*ServiceClient, error) { + authPolicy := runtime.NewBearerTokenPolicy(cred, []string{tokenScope}, nil) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(serviceURL, conOptions) + + return &ServiceClient{ + client: newServiceClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewServiceClientWithNoCredential creates a ServiceClient object using the specified URL and options. +// Example of serviceURL: https://.blob.core.windows.net? +func NewServiceClientWithNoCredential(serviceURL string, options *ClientOptions) (*ServiceClient, error) { + conOptions := getConnectionOptions(options) + conn := newConnection(serviceURL, conOptions) + + return &ServiceClient{ + client: newServiceClient(conn.Endpoint(), conn.Pipeline()), + }, nil +} + +// NewServiceClientWithSharedKey creates a ServiceClient object using the specified URL, shared key, and options. +// Example of serviceURL: https://.blob.core.windows.net +func NewServiceClientWithSharedKey(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*ServiceClient, error) { + authPolicy := newSharedKeyCredPolicy(cred) + conOptions := getConnectionOptions(options) + conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) + conn := newConnection(serviceURL, conOptions) + + return &ServiceClient{ + client: newServiceClient(conn.Endpoint(), conn.Pipeline()), + sharedKey: cred, + }, nil +} + +// NewServiceClientFromConnectionString creates a service client from the given connection string. +//nolint +func NewServiceClientFromConnectionString(connectionString string, options *ClientOptions) (*ServiceClient, error) { + endpoint, credential, err := parseConnectionString(connectionString) + if err != nil { + return nil, err + } + return NewServiceClientWithSharedKey(endpoint, credential, options) +} + +// NewContainerClient creates a new ContainerClient object by concatenating containerName to the end of +// ServiceClient's URL. The new ContainerClient uses the same request policy pipeline as the ServiceClient. +// To change the pipeline, create the ContainerClient and then call its WithPipeline method passing in the +// desired pipeline object. Or, call this package's NewContainerClient instead of calling this object's +// NewContainerClient method. +func (s *ServiceClient) NewContainerClient(containerName string) (*ContainerClient, error) { + containerURL := appendToURLPath(s.client.endpoint, containerName) + return &ContainerClient{ + client: newContainerClient(containerURL, s.client.pl), + sharedKey: s.sharedKey, + }, nil +} + +// CreateContainer is a lifecycle method to creates a new container under the specified account. +// If the container with the same name already exists, a ResourceExistsError will be raised. +// This method returns a client with which to interact with the newly created container. +func (s *ServiceClient) CreateContainer(ctx context.Context, containerName string, options *ContainerCreateOptions) (ContainerCreateResponse, error) { + containerClient, err := s.NewContainerClient(containerName) + if err != nil { + return ContainerCreateResponse{}, err + } + containerCreateResp, err := containerClient.Create(ctx, options) + return containerCreateResp, err +} + +// DeleteContainer is a lifecycle method that marks the specified container for deletion. +// The container and any blobs contained within it are later deleted during garbage collection. +// If the container is not found, a ResourceNotFoundError will be raised. +func (s *ServiceClient) DeleteContainer(ctx context.Context, containerName string, options *ContainerDeleteOptions) (ContainerDeleteResponse, error) { + containerClient, _ := s.NewContainerClient(containerName) + containerDeleteResp, err := containerClient.Delete(ctx, options) + return containerDeleteResp, err +} + +// appendToURLPath appends a string to the end of a URL's path (prefixing the string with a '/' if required) +func appendToURLPath(u string, name string) string { + // e.g. "https://ms.com/a/b/?k1=v1&k2=v2#f" + // When you call url.Parse() this is what you'll get: + // Scheme: "https" + // Opaque: "" + // User: nil + // Host: "ms.com" + // Path: "/a/b/" This should start with a / and it might or might not have a trailing slash + // RawPath: "" + // ForceQuery: false + // RawQuery: "k1=v1&k2=v2" + // Fragment: "f" + uri, _ := url.Parse(u) + + if len(uri.Path) == 0 || uri.Path[len(uri.Path)-1] != '/' { + uri.Path += "/" // Append "/" to end before appending name + } + uri.Path += name + return uri.String() +} + +// GetAccountInfo provides account level information +func (s *ServiceClient) GetAccountInfo(ctx context.Context, o *ServiceGetAccountInfoOptions) (ServiceGetAccountInfoResponse, error) { + getAccountInfoOptions := o.format() + resp, err := s.client.GetAccountInfo(ctx, getAccountInfoOptions) + return toServiceGetAccountInfoResponse(resp), handleError(err) +} + +// ListContainers operation returns a pager of the containers under the specified account. +// Use an empty Marker to start enumeration from the beginning. Container names are returned in lexicographic order. +// For more information, see https://docs.microsoft.com/rest/api/storageservices/list-containers2. +func (s *ServiceClient) ListContainers(o *ListContainersOptions) *ServiceListContainersSegmentPager { + listOptions := o.format() + pager := s.client.ListContainersSegment(listOptions) + //TODO: .Err()? + //// override the generated advancer, which is incorrect + //if pager.Err() != nil { + // return pager + //} + + pager.advancer = func(ctx context.Context, response serviceClientListContainersSegmentResponse) (*policy.Request, error) { + if response.ListContainersSegmentResponse.NextMarker == nil { + return nil, handleError(errors.New("unexpected missing NextMarker")) + } + req, err := s.client.listContainersSegmentCreateRequest(ctx, listOptions) + if err != nil { + return nil, handleError(err) + } + queryValues, _ := url.ParseQuery(req.Raw().URL.RawQuery) + queryValues.Set("marker", *response.ListContainersSegmentResponse.NextMarker) + + req.Raw().URL.RawQuery = queryValues.Encode() + return req, nil + } + + return toServiceListContainersSegmentPager(*pager) +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules. +func (s *ServiceClient) GetProperties(ctx context.Context, o *ServiceGetPropertiesOptions) (ServiceGetPropertiesResponse, error) { + getPropertiesOptions := o.format() + resp, err := s.client.GetProperties(ctx, getPropertiesOptions) + + return toServiceGetPropertiesResponse(resp), handleError(err) +} + +// SetProperties Sets the properties of a storage account's Blob service, including Azure Storage Analytics. +// If an element (e.g. analytics_logging) is left as None, the existing settings on the service for that functionality are preserved. +func (s *ServiceClient) SetProperties(ctx context.Context, o *ServiceSetPropertiesOptions) (ServiceSetPropertiesResponse, error) { + properties, setPropertiesOptions := o.format() + resp, err := s.client.SetProperties(ctx, properties, setPropertiesOptions) + + return toServiceSetPropertiesResponse(resp), handleError(err) +} + +// GetStatistics Retrieves statistics related to replication for the Blob service. +// It is only available when read-access geo-redundant replication is enabled for the storage account. +// With geo-redundant replication, Azure Storage maintains your data durable +// in two locations. In both locations, Azure Storage constantly maintains +// multiple healthy replicas of your data. The location where you read, +// create, update, or delete data is the primary storage account location. +// The primary location exists in the region you choose at the time you +// create an account via the Azure Management Azure classic portal, for +// example, North Central US. The location to which your data is replicated +// is the secondary location. The secondary location is automatically +// determined based on the location of the primary; it is in a second data +// center that resides in the same region as the primary location. Read-only +// access is available from the secondary location, if read-access geo-redundant +// replication is enabled for your storage account. +func (s *ServiceClient) GetStatistics(ctx context.Context, o *ServiceGetStatisticsOptions) (ServiceGetStatisticsResponse, error) { + getStatisticsOptions := o.format() + resp, err := s.client.GetStatistics(ctx, getStatisticsOptions) + + return toServiceGetStatisticsResponse(resp), handleError(err) +} + +// CanGetAccountSASToken checks if shared key in ServiceClient is nil +func (s *ServiceClient) CanGetAccountSASToken() bool { + return s.sharedKey != nil +} + +// GetSASURL is a convenience method for generating a SAS token for the currently pointed at account. +// It can only be used if the credential supplied during creation was a SharedKeyCredential. +// This validity can be checked with CanGetAccountSASToken(). +func (s *ServiceClient) GetSASURL(resources AccountSASResourceTypes, permissions AccountSASPermissions, start time.Time, expiry time.Time) (string, error) { + if s.sharedKey == nil { + return "", errors.New("SAS can only be signed with a SharedKeyCredential") + } + + qps, err := AccountSASSignatureValues{ + Version: SASVersion, + Protocol: SASProtocolHTTPS, + Permissions: permissions.String(), + Services: "b", + ResourceTypes: resources.String(), + StartTime: start.UTC(), + ExpiryTime: expiry.UTC(), + }.Sign(s.sharedKey) + if err != nil { + return "", err + } + + endpoint := s.URL() + if !strings.HasSuffix(endpoint, "/") { + endpoint += "/" + } + endpoint += "?" + qps.Encode() + + return endpoint, nil +} + +// FindBlobsByTags operation finds all blobs in the storage account whose tags match a given search expression. +// Filter blobs searches across all containers within a storage account but can be scoped within the expression to a single container. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +// To specify a container, eg. "@container=’containerName’ and Name = ‘C’" +func (s *ServiceClient) FindBlobsByTags(ctx context.Context, o *ServiceFilterBlobsOptions) (ServiceFilterBlobsResponse, error) { + // TODO: Use pager here? Missing support from zz_generated_pagers.go + serviceFilterBlobsOptions := o.pointer() + resp, err := s.client.FilterBlobs(ctx, serviceFilterBlobsOptions) + return toServiceFilterBlobsResponse(resp), err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go new file mode 100644 index 000000000..60b1e5a76 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_shared_policy_shared_key_credential.go @@ -0,0 +1,197 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "sync/atomic" + "time" + + azlog "github.com/Azure/azure-sdk-for-go/sdk/azcore/log" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +// NewSharedKeyCredential creates an immutable SharedKeyCredential containing the +// storage account's name and either its primary or secondary key. +func NewSharedKeyCredential(accountName string, accountKey string) (*SharedKeyCredential, error) { + c := SharedKeyCredential{accountName: accountName} + if err := c.SetAccountKey(accountKey); err != nil { + return nil, err + } + return &c, nil +} + +// SharedKeyCredential contains an account's name and its primary or secondary key. +// It is immutable making it shareable and goroutine-safe. +type SharedKeyCredential struct { + // Only the NewSharedKeyCredential method should set these; all other methods should treat them as read-only + accountName string + accountKey atomic.Value // []byte +} + +// AccountName returns the Storage account's name. +func (c *SharedKeyCredential) AccountName() string { + return c.accountName +} + +// SetAccountKey replaces the existing account key with the specified account key. +func (c *SharedKeyCredential) SetAccountKey(accountKey string) error { + _bytes, err := base64.StdEncoding.DecodeString(accountKey) + if err != nil { + return fmt.Errorf("decode account key: %w", err) + } + c.accountKey.Store(_bytes) + return nil +} + +// ComputeHMACSHA256 generates a hash signature for an HTTP request or for a SAS. +func (c *SharedKeyCredential) ComputeHMACSHA256(message string) (string, error) { + h := hmac.New(sha256.New, c.accountKey.Load().([]byte)) + _, err := h.Write([]byte(message)) + return base64.StdEncoding.EncodeToString(h.Sum(nil)), err +} + +func (c *SharedKeyCredential) buildStringToSign(req *http.Request) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + headers := req.Header + contentLength := headers.Get(headerContentLength) + if contentLength == "0" { + contentLength = "" + } + + canonicalizedResource, err := c.buildCanonicalizedResource(req.URL) + if err != nil { + return "", err + } + + stringToSign := strings.Join([]string{ + req.Method, + headers.Get(headerContentEncoding), + headers.Get(headerContentLanguage), + contentLength, + headers.Get(headerContentMD5), + headers.Get(headerContentType), + "", // Empty date because x-ms-date is expected (as per web page above) + headers.Get(headerIfModifiedSince), + headers.Get(headerIfMatch), + headers.Get(headerIfNoneMatch), + headers.Get(headerIfUnmodifiedSince), + headers.Get(headerRange), + c.buildCanonicalizedHeader(headers), + canonicalizedResource, + }, "\n") + return stringToSign, nil +} + +func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string { + cm := map[string][]string{} + for k, v := range headers { + headerName := strings.TrimSpace(strings.ToLower(k)) + if strings.HasPrefix(headerName, "x-ms-") { + cm[headerName] = v // NOTE: the value must not have any whitespace around it. + } + } + if len(cm) == 0 { + return "" + } + + keys := make([]string, 0, len(cm)) + for key := range cm { + keys = append(keys, key) + } + sort.Strings(keys) + ch := bytes.NewBufferString("") + for i, key := range keys { + if i > 0 { + ch.WriteRune('\n') + } + ch.WriteString(key) + ch.WriteRune(':') + ch.WriteString(strings.Join(cm[key], ",")) + } + return ch.String() +} + +func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, error) { + // https://docs.microsoft.com/en-us/rest/api/storageservices/authentication-for-the-azure-storage-services + cr := bytes.NewBufferString("/") + cr.WriteString(c.accountName) + + if len(u.Path) > 0 { + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr.WriteString(u.EscapedPath()) + } else { + // a slash is required to indicate the root path + cr.WriteString("/") + } + + // params is a map[string][]string; param name is key; params values is []string + params, err := url.ParseQuery(u.RawQuery) // Returns URL decoded values + if err != nil { + return "", fmt.Errorf("failed to parse query params: %w", err) + } + + if len(params) > 0 { // There is at least 1 query parameter + var paramNames []string // We use this to sort the parameter key names + for paramName := range params { + paramNames = append(paramNames, paramName) // paramNames must be lowercase + } + sort.Strings(paramNames) + + for _, paramName := range paramNames { + paramValues := params[paramName] + sort.Strings(paramValues) + + // Join the sorted key values separated by ',' + // Then prepend "keyName:"; then add this string to the buffer + cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + } + } + return cr.String(), nil +} + +type sharedKeyCredPolicy struct { + cred *SharedKeyCredential +} + +func newSharedKeyCredPolicy(cred *SharedKeyCredential) *sharedKeyCredPolicy { + return &sharedKeyCredPolicy{cred: cred} +} + +func (s *sharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) { + if d := req.Raw().Header.Get(headerXmsDate); d == "" { + req.Raw().Header.Set(headerXmsDate, time.Now().UTC().Format(http.TimeFormat)) + } + stringToSign, err := s.cred.buildStringToSign(req.Raw()) + if err != nil { + return nil, err + } + signature, err := s.cred.ComputeHMACSHA256(stringToSign) + if err != nil { + return nil, err + } + authHeader := strings.Join([]string{"SharedKey ", s.cred.AccountName(), ":", signature}, "") + req.Raw().Header.Set(headerAuthorization, authHeader) + + response, err := req.Next() + if err != nil && response != nil && response.StatusCode == http.StatusForbidden { + // Service failed to authenticate request, log it + log.Write(azlog.EventResponse, "===== HTTP Forbidden status, String-to-NewSASQueryParameters:\n"+stringToSign+"\n===============================\n") + } + return response, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go new file mode 100644 index 000000000..08c9c8730 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_storage_error.go @@ -0,0 +1,236 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "net/http" + "sort" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// InternalError is an internal error type that all errors get wrapped in. +type InternalError struct { + cause error +} + +// Error checks if InternalError can be cast as StorageError +func (e *InternalError) Error() string { + if (errors.Is(e.cause, StorageError{})) { + return e.cause.Error() + } + + return fmt.Sprintf("===== INTERNAL ERROR =====\n%s", e.cause.Error()) +} + +// Is casts err into InternalError +func (e *InternalError) Is(err error) bool { + _, ok := err.(*InternalError) + + return ok +} + +// As casts target interface into InternalError +func (e *InternalError) As(target interface{}) bool { + nt, ok := target.(**InternalError) + + if ok { + *nt = e + return ok + } + + //goland:noinspection GoErrorsAs + return errors.As(e.cause, target) +} + +// StorageError is the internal struct that replaces the generated StorageError. +// TL;DR: This implements xml.Unmarshaler, and when the original StorageError is substituted, this unmarshaler kicks in. +// This handles the description and details. defunkifyStorageError handles the response, cause, and service code. +type StorageError struct { + response *http.Response + description string + + ErrorCode StorageErrorCode + details map[string]string +} + +func handleError(err error) error { + if err == nil { + return nil + } + var respErr *azcore.ResponseError + if errors.As(err, &respErr) { + return &InternalError{responseErrorToStorageError(respErr)} + } + + if err != nil { + return &InternalError{err} + } + + return nil +} + +// converts an *azcore.ResponseError to a *StorageError, or if that fails, a *InternalError +func responseErrorToStorageError(responseError *azcore.ResponseError) error { + var storageError StorageError + body, err := runtime.Payload(responseError.RawResponse) + if err != nil { + goto Default + } + if len(body) > 0 { + if err := xml.Unmarshal(body, &storageError); err != nil { + goto Default + } + } + + storageError.response = responseError.RawResponse + + storageError.ErrorCode = StorageErrorCode(responseError.RawResponse.Header.Get("x-ms-error-code")) + + if code, ok := storageError.details["Code"]; ok { + storageError.ErrorCode = StorageErrorCode(code) + delete(storageError.details, "Code") + } + + return &storageError + +Default: + return &InternalError{ + cause: responseError, + } +} + +// StatusCode returns service-error information. The caller may examine these values but should not modify any of them. +func (e *StorageError) StatusCode() int { + return e.response.StatusCode +} + +// Error implements the error interface's Error method to return a string representation of the error. +func (e StorageError) Error() string { + b := &bytes.Buffer{} + + if e.response != nil { + _, _ = fmt.Fprintf(b, "===== RESPONSE ERROR (ErrorCode=%s) =====\n", e.ErrorCode) + _, _ = fmt.Fprintf(b, "Description=%s, Details: ", e.description) + if len(e.details) == 0 { + b.WriteString("(none)\n") + } else { + b.WriteRune('\n') + keys := make([]string, 0, len(e.details)) + // Alphabetize the details + for k := range e.details { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + _, _ = fmt.Fprintf(b, " %s: %+v\n", k, e.details[k]) + } + } + // req := azcore.Request{Request: e.response.Request}.Copy() // Make a copy of the response's request + // TODO: Come Here Mohit Adele + //writeRequestWithResponse(b, &azcore.Request{Request: e.response.Request}, e.response) + } + + return b.String() + ///azcore.writeRequestWithResponse(b, prepareRequestForLogging(req), e.response, nil) + // return e.ErrorNode.Error(b.String()) +} + +// Is checks if err can be cast as StorageError +func (e StorageError) Is(err error) bool { + _, ok := err.(StorageError) + _, ok2 := err.(*StorageError) + + return ok || ok2 +} + +// Response returns StorageError.response +func (e StorageError) Response() *http.Response { + return e.response +} + +//nolint +func writeRequestWithResponse(b *bytes.Buffer, request *policy.Request, response *http.Response) { + // Write the request into the buffer. + _, _ = fmt.Fprint(b, " "+request.Raw().Method+" "+request.Raw().URL.String()+"\n") + writeHeader(b, request.Raw().Header) + if response != nil { + _, _ = fmt.Fprintln(b, " --------------------------------------------------------------------------------") + _, _ = fmt.Fprint(b, " RESPONSE Status: "+response.Status+"\n") + writeHeader(b, response.Header) + } +} + +// formatHeaders appends an HTTP request's or response's header into a Buffer. +//nolint +func writeHeader(b *bytes.Buffer, header map[string][]string) { + if len(header) == 0 { + b.WriteString(" (no headers)\n") + return + } + keys := make([]string, 0, len(header)) + // Alphabetize the headers + for k := range header { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + // Redact the value of any Authorization header to prevent security information from persisting in logs + value := interface{}("REDACTED") + if !strings.EqualFold(k, "Authorization") { + value = header[k] + } + _, _ = fmt.Fprintf(b, " %s: %+v\n", k, value) + } +} + +// Temporary returns true if the error occurred due to a temporary condition (including an HTTP status of 500 or 503). +func (e *StorageError) Temporary() bool { + if e.response != nil { + if (e.response.StatusCode == http.StatusInternalServerError) || (e.response.StatusCode == http.StatusServiceUnavailable) || (e.response.StatusCode == http.StatusBadGateway) { + return true + } + } + + return false +} + +// UnmarshalXML performs custom unmarshalling of XML-formatted Azure storage request errors. +//nolint +func (e *StorageError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { + tokName := "" + var t xml.Token + for t, err = d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = tt.Name.Local + case xml.EndElement: + tokName = "" + case xml.CharData: + switch tokName { + case "": + continue + case "Message": + e.description = string(tt) + default: + if e.details == nil { + e.details = map[string]string{} + } + e.details[tokName] = string(tt) + } + } + } + + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go new file mode 100644 index 000000000..341858f1a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zc_validators.go @@ -0,0 +1,107 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "errors" + "fmt" + "io" + "strconv" +) + +//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + +// Raw converts PageRange into primitive start, end integers of type int64 +func (pr *PageRange) Raw() (start, end int64) { + if pr.Start != nil { + start = *pr.Start + } + if pr.End != nil { + end = *pr.End + } + + return +} + +// HttpRange defines a range of bytes within an HTTP resource, starting at offset and +// ending at offset+count. A zero-value HttpRange indicates the entire resource. An HttpRange +// which has an offset but na zero value count indicates from the offset to the resource's end. +type HttpRange struct { + Offset int64 + Count int64 +} + +func NewHttpRange(offset, count int64) *HttpRange { + return &HttpRange{Offset: offset, Count: count} +} + +func (r *HttpRange) format() *string { + if r == nil || (r.Offset == 0 && r.Count == 0) { // Do common case first for performance + return nil // No specified range + } + endOffset := "" // if count == CountToEnd (0) + if r.Count > 0 { + endOffset = strconv.FormatInt((r.Offset+r.Count)-1, 10) + } + dataRange := fmt.Sprintf("bytes=%v-%s", r.Offset, endOffset) + return &dataRange +} + +func getSourceRange(offset, count *int64) *string { + if offset == nil && count == nil { + return nil + } + newOffset := int64(0) + newCount := int64(CountToEnd) + + if offset != nil { + newOffset = *offset + } + + if count != nil { + newCount = *count + } + + return (&HttpRange{Offset: newOffset, Count: newCount}).format() +} + +func validateSeekableStreamAt0AndGetCount(body io.ReadSeeker) (int64, error) { + if body == nil { // nil body's are "logically" seekable to 0 and are 0 bytes long + return 0, nil + } + + err := validateSeekableStreamAt0(body) + if err != nil { + return 0, err + } + + count, err := body.Seek(0, io.SeekEnd) + if err != nil { + return 0, errors.New("body stream must be seekable") + } + + _, err = body.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + return count, nil +} + +// return an error if body is not a valid seekable stream at 0 +func validateSeekableStreamAt0(body io.ReadSeeker) error { + if body == nil { // nil body's are "logically" seekable to 0 + return nil + } + if pos, err := body.Seek(0, io.SeekCurrent); pos != 0 || err != nil { + // Help detect programmer error + if err != nil { + return errors.New("body stream must be seekable") + } + return errors.New("body stream must be set to position 0") + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go new file mode 100644 index 000000000..93a2b1a70 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_access_conditions.go @@ -0,0 +1,43 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +const ( + // ETagNone represents an empty entity tag. + ETagNone = "" + + // ETagAny matches any entity tag. + ETagAny = "*" +) + +// ContainerAccessConditions identifies container-specific access conditions which you optionally set. +type ContainerAccessConditions struct { + ModifiedAccessConditions *ModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +func (ac *ContainerAccessConditions) format() (*ModifiedAccessConditions, *LeaseAccessConditions) { + if ac == nil { + return nil, nil + } + + return ac.ModifiedAccessConditions, ac.LeaseAccessConditions +} + +// BlobAccessConditions identifies blob-specific access conditions which you optionally set. +type BlobAccessConditions struct { + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (ac *BlobAccessConditions) format() (*LeaseAccessConditions, *ModifiedAccessConditions) { + if ac == nil { + return nil, nil + } + + return ac.LeaseAccessConditions, ac.ModifiedAccessConditions +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go new file mode 100644 index 000000000..19c3fef66 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_append_blob_client_util.go @@ -0,0 +1,184 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import "time" + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlobCreateOptions provides set of configurations for Create Append Blob operation +type AppendBlobCreateOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + + BlobAccessConditions *BlobAccessConditions + + HTTPHeaders *BlobHTTPHeaders + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + // Optional. Used to set blob tags in various blob operations. + TagsMap map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string +} + +func (o *AppendBlobCreateOptions) format() (*appendBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions, + *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { + + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := appendBlobClientCreateOptions{ + BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), + Metadata: o.Metadata, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// AppendBlobCreateResponse contains the response from method AppendBlobClient.Create. +type AppendBlobCreateResponse struct { + appendBlobClientCreateResponse +} + +func toAppendBlobCreateResponse(resp appendBlobClientCreateResponse) AppendBlobCreateResponse { + return AppendBlobCreateResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlobAppendBlockOptions provides set of configurations for AppendBlock operation +type AppendBlobAppendBlockOptions struct { + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + + BlobAccessConditions *BlobAccessConditions +} + +func (o *AppendBlobAppendBlockOptions) format() (*appendBlobClientAppendBlockOptions, *AppendPositionAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions, *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &appendBlobClientAppendBlockOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.AppendPositionAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions, leaseAccessConditions +} + +// AppendBlobAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock. +type AppendBlobAppendBlockResponse struct { + appendBlobClientAppendBlockResponse +} + +func toAppendBlobAppendBlockResponse(resp appendBlobClientAppendBlockResponse) AppendBlobAppendBlockResponse { + return AppendBlobAppendBlockResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlobAppendBlockFromURLOptions provides set of configurations for AppendBlockFromURL operation +type AppendBlobAppendBlockFromURLOptions struct { + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + AppendPositionAccessConditions *AppendPositionAccessConditions + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + BlobAccessConditions *BlobAccessConditions + // Optional, you can specify whether a particular range of the blob is read + Offset *int64 + + Count *int64 +} + +func (o *AppendBlobAppendBlockFromURLOptions) format() (*appendBlobClientAppendBlockFromURLOptions, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *AppendPositionAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := &appendBlobClientAppendBlockFromURLOptions{ + SourceRange: getSourceRange(o.Offset, o.Count), + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.AppendPositionAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// AppendBlobAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL. +type AppendBlobAppendBlockFromURLResponse struct { + appendBlobClientAppendBlockFromURLResponse +} + +func toAppendBlobAppendBlockFromURLResponse(resp appendBlobClientAppendBlockFromURLResponse) AppendBlobAppendBlockFromURLResponse { + return AppendBlobAppendBlockFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// AppendBlobSealOptions provides set of configurations for SealAppendBlob operation +type AppendBlobSealOptions struct { + BlobAccessConditions *BlobAccessConditions + AppendPositionAccessConditions *AppendPositionAccessConditions +} + +func (o *AppendBlobSealOptions) format() (leaseAccessConditions *LeaseAccessConditions, + modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return +} + +// AppendBlobSealResponse contains the response from method AppendBlobClient.Seal. +type AppendBlobSealResponse struct { + appendBlobClientSealResponse +} + +func toAppendBlobSealResponse(resp appendBlobClientSealResponse) AppendBlobSealResponse { + return AppendBlobSealResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go new file mode 100644 index 000000000..f4425b18c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_client_util.go @@ -0,0 +1,478 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "io" + "net/http" + "time" +) + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobDownloadOptions provides set of configurations for Download blob operation +type BlobDownloadOptions struct { + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + + // Optional, you can specify whether a particular range of the blob is read + Offset *int64 + Count *int64 + + BlobAccessConditions *BlobAccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (o *BlobDownloadOptions) format() (*blobClientDownloadOptions, *LeaseAccessConditions, *CpkInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + offset := int64(0) + count := int64(CountToEnd) + + if o.Offset != nil { + offset = *o.Offset + } + + if o.Count != nil { + count = *o.Count + } + + basics := blobClientDownloadOptions{ + RangeGetContentMD5: o.RangeGetContentMD5, + Range: (&HttpRange{Offset: offset, Count: count}).format(), + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &basics, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions +} + +// BlobDownloadResponse wraps AutoRest generated BlobDownloadResponse and helps to provide info for retry. +type BlobDownloadResponse struct { + blobClientDownloadResponse + ctx context.Context + b *BlobClient + getInfo HTTPGetterInfo + ObjectReplicationRules []ObjectReplicationPolicy +} + +// Body constructs new RetryReader stream for reading data. If a connection fails +// while reading, it will make additional requests to reestablish a connection and +// continue reading. Specifying a RetryReaderOption's with MaxRetryRequests set to 0 +// (the default), returns the original response body and no retries will be performed. +// Pass in nil for options to accept the default options. +func (r *BlobDownloadResponse) Body(options *RetryReaderOptions) io.ReadCloser { + if options == nil { + options = &RetryReaderOptions{} + } + + if options.MaxRetryRequests == 0 { // No additional retries + return r.RawResponse.Body + } + return NewRetryReader(r.ctx, r.RawResponse, r.getInfo, *options, + func(ctx context.Context, getInfo HTTPGetterInfo) (*http.Response, error) { + accessConditions := &BlobAccessConditions{ + ModifiedAccessConditions: &ModifiedAccessConditions{IfMatch: &getInfo.ETag}, + } + options := BlobDownloadOptions{ + Offset: &getInfo.Offset, + Count: &getInfo.Count, + BlobAccessConditions: accessConditions, + CpkInfo: options.CpkInfo, + //CpkScopeInfo: o.CpkScopeInfo, + } + resp, err := r.b.Download(ctx, &options) + if err != nil { + return nil, err + } + return resp.RawResponse, err + }, + ) +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobDeleteOptions provides set of configurations for Delete blob operation +type BlobDeleteOptions struct { + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob itself + DeleteSnapshots *DeleteSnapshotsOptionType + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlobDeleteOptions) format() (*blobClientDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := blobClientDeleteOptions{ + DeleteSnapshots: o.DeleteSnapshots, + } + + if o.BlobAccessConditions == nil { + return &basics, nil, nil + } + + return &basics, o.BlobAccessConditions.LeaseAccessConditions, o.BlobAccessConditions.ModifiedAccessConditions +} + +// BlobDeleteResponse contains the response from method BlobClient.Delete. +type BlobDeleteResponse struct { + blobClientDeleteResponse +} + +func toBlobDeleteResponse(resp blobClientDeleteResponse) BlobDeleteResponse { + return BlobDeleteResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobUndeleteOptions provides set of configurations for Blob Undelete operation +type BlobUndeleteOptions struct { +} + +func (o *BlobUndeleteOptions) format() *blobClientUndeleteOptions { + return nil +} + +// BlobUndeleteResponse contains the response from method BlobClient.Undelete. +type BlobUndeleteResponse struct { + blobClientUndeleteResponse +} + +func toBlobUndeleteResponse(resp blobClientUndeleteResponse) BlobUndeleteResponse { + return BlobUndeleteResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobSetTierOptions provides set of configurations for SetTier on blob operation +type BlobSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobSetTierOptions) format() (*blobClientSetTierOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + basics := blobClientSetTierOptions{RehydratePriority: o.RehydratePriority} + return &basics, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// BlobSetTierResponse contains the response from method BlobClient.SetTier. +type BlobSetTierResponse struct { + blobClientSetTierResponse +} + +func toBlobSetTierResponse(resp blobClientSetTierResponse) BlobSetTierResponse { + return BlobSetTierResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobGetPropertiesOptions provides set of configurations for GetProperties blob operation +type BlobGetPropertiesOptions struct { + BlobAccessConditions *BlobAccessConditions + CpkInfo *CpkInfo +} + +func (o *BlobGetPropertiesOptions) format() (blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, + leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions = o.BlobAccessConditions.format() + return nil, leaseAccessConditions, o.CpkInfo, modifiedAccessConditions +} + +// ObjectReplicationRules struct +type ObjectReplicationRules struct { + RuleId string + Status string +} + +// ObjectReplicationPolicy are deserialized attributes +type ObjectReplicationPolicy struct { + PolicyId *string + Rules *[]ObjectReplicationRules +} + +// BlobGetPropertiesResponse reformat the GetPropertiesResponse object for easy consumption +type BlobGetPropertiesResponse struct { + blobClientGetPropertiesResponse + + // deserialized attributes + ObjectReplicationRules []ObjectReplicationPolicy +} + +func toGetBlobPropertiesResponse(resp blobClientGetPropertiesResponse) BlobGetPropertiesResponse { + getResp := BlobGetPropertiesResponse{ + blobClientGetPropertiesResponse: resp, + ObjectReplicationRules: deserializeORSPolicies(resp.ObjectReplicationRules), + } + return getResp +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobSetHTTPHeadersOptions provides set of configurations for SetHTTPHeaders on blob operation +type BlobSetHTTPHeadersOptions struct { + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobSetHTTPHeadersOptions) format() (*blobClientSetHTTPHeadersOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// BlobSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders. +type BlobSetHTTPHeadersResponse struct { + blobClientSetHTTPHeadersResponse +} + +func toBlobSetHTTPHeadersResponse(resp blobClientSetHTTPHeadersResponse) BlobSetHTTPHeadersResponse { + return BlobSetHTTPHeadersResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobSetMetadataOptions provides set of configurations for Set Metadata on blob operation +type BlobSetMetadataOptions struct { + LeaseAccessConditions *LeaseAccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobSetMetadataOptions) format() (leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, + cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + return o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions +} + +// BlobSetMetadataResponse contains the response from method BlobClient.SetMetadata. +type BlobSetMetadataResponse struct { + blobClientSetMetadataResponse +} + +func toBlobSetMetadataResponse(resp blobClientSetMetadataResponse) BlobSetMetadataResponse { + return BlobSetMetadataResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobCreateSnapshotOptions provides set of configurations for CreateSnapshot of blob operation +type BlobCreateSnapshotOptions struct { + Metadata map[string]string + LeaseAccessConditions *LeaseAccessConditions + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobCreateSnapshotOptions) format() (blobSetMetadataOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, + cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + basics := blobClientCreateSnapshotOptions{ + Metadata: o.Metadata, + } + + return &basics, o.CpkInfo, o.CpkScopeInfo, o.ModifiedAccessConditions, o.LeaseAccessConditions +} + +// BlobCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot +type BlobCreateSnapshotResponse struct { + blobClientCreateSnapshotResponse +} + +func toBlobCreateSnapshotResponse(resp blobClientCreateSnapshotResponse) BlobCreateSnapshotResponse { + return BlobCreateSnapshotResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobStartCopyOptions provides set of configurations for StartCopyFromURL blob operation +type BlobStartCopyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Used to set blob tags in various blob operations. + TagsMap map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + ModifiedAccessConditions *ModifiedAccessConditions + + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *BlobStartCopyOptions) format() (blobStartCopyFromUrlOptions *blobClientStartCopyFromURLOptions, + sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + basics := blobClientStartCopyFromURLOptions{ + BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), + Metadata: o.Metadata, + RehydratePriority: o.RehydratePriority, + SealBlob: o.SealBlob, + Tier: o.Tier, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + } + + return &basics, o.SourceModifiedAccessConditions, o.ModifiedAccessConditions, o.LeaseAccessConditions +} + +// BlobStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL. +type BlobStartCopyFromURLResponse struct { + blobClientStartCopyFromURLResponse +} + +func toBlobStartCopyFromURLResponse(resp blobClientStartCopyFromURLResponse) BlobStartCopyFromURLResponse { + return BlobStartCopyFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobAbortCopyOptions provides set of configurations for AbortCopyFromURL operation +type BlobAbortCopyOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *BlobAbortCopyOptions) format() (blobAbortCopyFromUrlOptions *blobClientAbortCopyFromURLOptions, + leaseAccessConditions *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + return nil, o.LeaseAccessConditions +} + +// BlobAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL +type BlobAbortCopyFromURLResponse struct { + blobClientAbortCopyFromURLResponse +} + +func toBlobAbortCopyFromURLResponse(resp blobClientAbortCopyFromURLResponse) BlobAbortCopyFromURLResponse { + return BlobAbortCopyFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobSetTagsOptions provides set of configurations for SetTags operation +type BlobSetTagsOptions struct { + // The version id parameter is an opaque DateTime value that, when present, + // specifies the version of the blob to operate on. It's for service version 2019-10-10 and newer. + VersionID *string + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Optional header, Specifies the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + TagsMap map[string]string + + ModifiedAccessConditions *ModifiedAccessConditions + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *BlobSetTagsOptions) format() (*blobClientSetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &blobClientSetTagsOptions{ + Tags: serializeBlobTags(o.TagsMap), + TransactionalContentMD5: o.TransactionalContentMD5, + TransactionalContentCRC64: o.TransactionalContentCRC64, + VersionID: o.VersionID, + } + + return options, o.ModifiedAccessConditions, o.LeaseAccessConditions +} + +// BlobSetTagsResponse contains the response from method BlobClient.SetTags +type BlobSetTagsResponse struct { + blobClientSetTagsResponse +} + +func toBlobSetTagsResponse(resp blobClientSetTagsResponse) BlobSetTagsResponse { + return BlobSetTagsResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobGetTagsOptions provides set of configurations for GetTags operation +type BlobGetTagsOptions struct { + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. + Snapshot *string + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string + + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlobGetTagsOptions) format() (*blobClientGetTagsOptions, *ModifiedAccessConditions, *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil + } + + options := &blobClientGetTagsOptions{ + Snapshot: o.Snapshot, + VersionID: o.VersionID, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + + return options, modifiedAccessConditions, leaseAccessConditions +} + +// BlobGetTagsResponse contains the response from method BlobClient.GetTags +type BlobGetTagsResponse struct { + blobClientGetTagsResponse +} + +func toBlobGetTagsResponse(resp blobClientGetTagsResponse) BlobGetTagsResponse { + return BlobGetTagsResponse{resp} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go new file mode 100644 index 000000000..4e574622c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_blob_lease_client_util.go @@ -0,0 +1,160 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobAcquireLeaseOptions provides set of configurations for AcquireLeaseBlob operation +type BlobAcquireLeaseOptions struct { + // Specifies the Duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease + // can be between 15 and 60 seconds. A lease Duration cannot be changed using renew or change. + Duration *int32 + + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobAcquireLeaseOptions) format() (blobClientAcquireLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return blobClientAcquireLeaseOptions{}, nil + } + return blobClientAcquireLeaseOptions{ + Duration: o.Duration, + }, o.ModifiedAccessConditions +} + +// BlobAcquireLeaseResponse contains the response from method BlobLeaseClient.AcquireLease. +type BlobAcquireLeaseResponse struct { + blobClientAcquireLeaseResponse +} + +func toBlobAcquireLeaseResponse(resp blobClientAcquireLeaseResponse) BlobAcquireLeaseResponse { + return BlobAcquireLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobBreakLeaseOptions provides set of configurations for BreakLeaseBlob operation +type BlobBreakLeaseOptions struct { + // For a break operation, proposed Duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the lease. If longer, the time remaining on the lease + // is used. A new lease will not be available before the break period has expired, but the lease may be held for longer than + // the break period. If this header does not appear with a break operation, a fixed-Duration lease breaks after the remaining + // lease period elapses, and an infinite lease breaks immediately. + BreakPeriod *int32 + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobBreakLeaseOptions) format() (*blobClientBreakLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + if o.BreakPeriod != nil { + period := leasePeriodPointer(*o.BreakPeriod) + return &blobClientBreakLeaseOptions{ + BreakPeriod: period, + }, o.ModifiedAccessConditions + } + + return nil, o.ModifiedAccessConditions +} + +// BlobBreakLeaseResponse contains the response from method BlobLeaseClient.BreakLease. +type BlobBreakLeaseResponse struct { + blobClientBreakLeaseResponse +} + +func toBlobBreakLeaseResponse(resp blobClientBreakLeaseResponse) BlobBreakLeaseResponse { + return BlobBreakLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobChangeLeaseOptions provides set of configurations for ChangeLeaseBlob operation +type BlobChangeLeaseOptions struct { + ProposedLeaseID *string + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobChangeLeaseOptions) format() (*string, *blobClientChangeLeaseOptions, *ModifiedAccessConditions, error) { + generatedUuid, err := uuid.New() + if err != nil { + return nil, nil, nil, err + } + leaseID := to.Ptr(generatedUuid.String()) + if o == nil { + return leaseID, nil, nil, nil + } + + if o.ProposedLeaseID == nil { + o.ProposedLeaseID = leaseID + } + + return o.ProposedLeaseID, nil, o.ModifiedAccessConditions, nil +} + +// BlobChangeLeaseResponse contains the response from method BlobLeaseClient.ChangeLease +type BlobChangeLeaseResponse struct { + blobClientChangeLeaseResponse +} + +func toBlobChangeLeaseResponse(resp blobClientChangeLeaseResponse) BlobChangeLeaseResponse { + return BlobChangeLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlobRenewLeaseOptions provides set of configurations for RenewLeaseBlob operation +type BlobRenewLeaseOptions struct { + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *BlobRenewLeaseOptions) format() (*blobClientRenewLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// BlobRenewLeaseResponse contains the response from method BlobClient.RenewLease. +type BlobRenewLeaseResponse struct { + blobClientRenewLeaseResponse +} + +func toBlobRenewLeaseResponse(resp blobClientRenewLeaseResponse) BlobRenewLeaseResponse { + return BlobRenewLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ReleaseLeaseBlobOptions provides set of configurations for ReleaseLeaseBlob operation +type ReleaseLeaseBlobOptions struct { + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ReleaseLeaseBlobOptions) format() (*blobClientReleaseLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// BlobReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease. +type BlobReleaseLeaseResponse struct { + blobClientReleaseLeaseResponse +} + +func toBlobReleaseLeaseResponse(resp blobClientReleaseLeaseResponse) BlobReleaseLeaseResponse { + return BlobReleaseLeaseResponse{resp} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go new file mode 100644 index 000000000..06d436855 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_block_blob_client_util.go @@ -0,0 +1,272 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import "time" + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobUploadOptions provides set of configurations for UploadBlockBlob operation +type BlockBlobUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + TagsMap map[string]string + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]string + + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + HTTPHeaders *BlobHTTPHeaders + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlockBlobUploadOptions) format() (*blockBlobClientUploadOptions, *BlobHTTPHeaders, *LeaseAccessConditions, + *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + basics := blockBlobClientUploadOptions{ + BlobTagsString: serializeBlobTagsToStrPtr(o.TagsMap), + Metadata: o.Metadata, + Tier: o.Tier, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &basics, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// BlockBlobUploadResponse contains the response from method BlockBlobClient.Upload. +type BlockBlobUploadResponse struct { + blockBlobClientUploadResponse +} + +func toBlockBlobUploadResponse(resp blockBlobClientUploadResponse) BlockBlobUploadResponse { + return BlockBlobUploadResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobStageBlockOptions provides set of configurations for StageBlock operation +type BlockBlobStageBlockOptions struct { + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + + LeaseAccessConditions *LeaseAccessConditions + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +func (o *BlockBlobStageBlockOptions) format() (*blockBlobClientStageBlockOptions, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo) { + if o == nil { + return nil, nil, nil, nil + } + + return &blockBlobClientStageBlockOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + }, o.LeaseAccessConditions, o.CpkInfo, o.CpkScopeInfo +} + +// BlockBlobStageBlockResponse contains the response from method BlockBlobClient.StageBlock. +type BlockBlobStageBlockResponse struct { + blockBlobClientStageBlockResponse +} + +func toBlockBlobStageBlockResponse(resp blockBlobClientStageBlockResponse) BlockBlobStageBlockResponse { + return BlockBlobStageBlockResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobStageBlockFromURLOptions provides set of configurations for StageBlockFromURL operation +type BlockBlobStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + + LeaseAccessConditions *LeaseAccessConditions + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + + Offset *int64 + + Count *int64 + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo +} + +func (o *BlockBlobStageBlockFromURLOptions) format() (*blockBlobClientStageBlockFromURLOptions, *CpkInfo, *CpkScopeInfo, *LeaseAccessConditions, *SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + options := &blockBlobClientStageBlockFromURLOptions{ + CopySourceAuthorization: o.CopySourceAuthorization, + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + SourceRange: getSourceRange(o.Offset, o.Count), + } + + return options, o.CpkInfo, o.CpkScopeInfo, o.LeaseAccessConditions, o.SourceModifiedAccessConditions +} + +// BlockBlobStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL. +type BlockBlobStageBlockFromURLResponse struct { + blockBlobClientStageBlockFromURLResponse +} + +func toBlockBlobStageBlockFromURLResponse(resp blockBlobClientStageBlockFromURLResponse) BlockBlobStageBlockFromURLResponse { + return BlockBlobStageBlockFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobCommitBlockListOptions provides set of configurations for CommitBlockList operation +type BlockBlobCommitBlockListOptions struct { + BlobTagsMap map[string]string + Metadata map[string]string + RequestID *string + Tier *AccessTier + Timeout *int32 + TransactionalContentCRC64 []byte + TransactionalContentMD5 []byte + BlobHTTPHeaders *BlobHTTPHeaders + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlockBlobCommitBlockListOptions) format() (*blockBlobClientCommitBlockListOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &blockBlobClientCommitBlockListOptions{ + BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), + Metadata: o.Metadata, + RequestID: o.RequestID, + Tier: o.Tier, + Timeout: o.Timeout, + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.BlobHTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// BlockBlobCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList. +type BlockBlobCommitBlockListResponse struct { + blockBlobClientCommitBlockListResponse +} + +func toBlockBlobCommitBlockListResponse(resp blockBlobClientCommitBlockListResponse) BlockBlobCommitBlockListResponse { + return BlockBlobCommitBlockListResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobGetBlockListOptions provides set of configurations for GetBlockList operation +type BlockBlobGetBlockListOptions struct { + Snapshot *string + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlockBlobGetBlockListOptions) format() (*blockBlobClientGetBlockListOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &blockBlobClientGetBlockListOptions{Snapshot: o.Snapshot}, leaseAccessConditions, modifiedAccessConditions +} + +// BlockBlobGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList. +type BlockBlobGetBlockListResponse struct { + blockBlobClientGetBlockListResponse +} + +func toBlockBlobGetBlockListResponse(resp blockBlobClientGetBlockListResponse) BlockBlobGetBlockListResponse { + return BlockBlobGetBlockListResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BlockBlobCopyFromURLOptions provides set of configurations for CopyBlockBlobFromURL operation +type BlockBlobCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsMap map[string]string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + BlobAccessConditions *BlobAccessConditions +} + +func (o *BlockBlobCopyFromURLOptions) format() (*blobClientCopyFromURLOptions, *SourceModifiedAccessConditions, *ModifiedAccessConditions, *LeaseAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + options := &blobClientCopyFromURLOptions{ + BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), + CopySourceAuthorization: o.CopySourceAuthorization, + ImmutabilityPolicyExpiry: o.ImmutabilityPolicyExpiry, + ImmutabilityPolicyMode: o.ImmutabilityPolicyMode, + LegalHold: o.LegalHold, + Metadata: o.Metadata, + SourceContentMD5: o.SourceContentMD5, + Tier: o.Tier, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions +} + +// BlockBlobCopyFromURLResponse contains the response from method BlockBlobClient.CopyFromURL. +type BlockBlobCopyFromURLResponse struct { + blobClientCopyFromURLResponse +} + +func toBlockBlobCopyFromURLResponse(resp blobClientCopyFromURLResponse) BlockBlobCopyFromURLResponse { + return BlockBlobCopyFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go new file mode 100644 index 000000000..657a767dd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_client_util.go @@ -0,0 +1,55 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// ClientOptions adds additional client options while constructing connection +type ClientOptions struct { + // Logging configures the built-in logging policy. + Logging policy.LogOptions + + // Retry configures the built-in retry policy. + Retry policy.RetryOptions + + // Telemetry configures the built-in telemetry policy. + Telemetry policy.TelemetryOptions + + // Transport sets the transport for HTTP requests. + Transport policy.Transporter + + // PerCallPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request. + PerCallPolicies []policy.Policy + + // PerRetryPolicies contains custom policies to inject into the pipeline. + // Each policy is executed once per request, and for each retry of that request. + PerRetryPolicies []policy.Policy +} + +func (c *ClientOptions) toPolicyOptions() *azcore.ClientOptions { + return &azcore.ClientOptions{ + Logging: c.Logging, + Retry: c.Retry, + Telemetry: c.Telemetry, + Transport: c.Transport, + PerCallPolicies: c.PerCallPolicies, + PerRetryPolicies: c.PerRetryPolicies, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +func getConnectionOptions(options *ClientOptions) *policy.ClientOptions { + if options == nil { + options = &ClientOptions{} + } + return options.toPolicyOptions() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go new file mode 100644 index 000000000..a33103e4b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_client_util.go @@ -0,0 +1,271 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerCreateOptions provides set of configurations for CreateContainer operation +type ContainerCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + + // Optional. Specifies a user-defined name-value pair associated with the blob. + Metadata map[string]string + + // Optional. Specifies the encryption scope settings to set on the container. + CpkScope *ContainerCpkScopeInfo +} + +func (o *ContainerCreateOptions) format() (*containerClientCreateOptions, *ContainerCpkScopeInfo) { + if o == nil { + return nil, nil + } + + basicOptions := containerClientCreateOptions{ + Access: o.Access, + Metadata: o.Metadata, + } + + return &basicOptions, o.CpkScope +} + +// ContainerCreateResponse is wrapper around containerClientCreateResponse +type ContainerCreateResponse struct { + containerClientCreateResponse +} + +func toContainerCreateResponse(resp containerClientCreateResponse) ContainerCreateResponse { + return ContainerCreateResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerDeleteOptions provides set of configurations for DeleteContainer operation +type ContainerDeleteOptions struct { + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerDeleteOptions) format() (*containerClientDeleteOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return nil, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// ContainerDeleteResponse contains the response from method ContainerClient.Delete. +type ContainerDeleteResponse struct { + containerClientDeleteResponse +} + +func toContainerDeleteResponse(resp containerClientDeleteResponse) ContainerDeleteResponse { + return ContainerDeleteResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerGetPropertiesOptions provides set of configurations for GetPropertiesContainer operation +type ContainerGetPropertiesOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *ContainerGetPropertiesOptions) format() (*containerClientGetPropertiesOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// ContainerGetPropertiesResponse contains the response from method ContainerClient.GetProperties +type ContainerGetPropertiesResponse struct { + containerClientGetPropertiesResponse +} + +func toContainerGetPropertiesResponse(resp containerClientGetPropertiesResponse) ContainerGetPropertiesResponse { + return ContainerGetPropertiesResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerSetMetadataOptions provides set of configurations for SetMetadataContainer operation +type ContainerSetMetadataOptions struct { + Metadata map[string]string + LeaseAccessConditions *LeaseAccessConditions + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerSetMetadataOptions) format() (*containerClientSetMetadataOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + return &containerClientSetMetadataOptions{Metadata: o.Metadata}, o.LeaseAccessConditions, o.ModifiedAccessConditions +} + +// ContainerSetMetadataResponse contains the response from method containerClient.SetMetadata +type ContainerSetMetadataResponse struct { + containerClientSetMetadataResponse +} + +func toContainerSetMetadataResponse(resp containerClientSetMetadataResponse) ContainerSetMetadataResponse { + return ContainerSetMetadataResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerGetAccessPolicyOptions provides set of configurations for GetAccessPolicy operation +type ContainerGetAccessPolicyOptions struct { + LeaseAccessConditions *LeaseAccessConditions +} + +func (o *ContainerGetAccessPolicyOptions) format() (*containerClientGetAccessPolicyOptions, *LeaseAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.LeaseAccessConditions +} + +// ContainerGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. +type ContainerGetAccessPolicyResponse struct { + containerClientGetAccessPolicyResponse +} + +func toContainerGetAccessPolicyResponse(resp containerClientGetAccessPolicyResponse) ContainerGetAccessPolicyResponse { + return ContainerGetAccessPolicyResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerSetAccessPolicyOptions provides set of configurations for ContainerClient.SetAccessPolicy operation +type ContainerSetAccessPolicyOptions struct { + AccessConditions *ContainerAccessConditions + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // the acls for the container + ContainerACL []*SignedIdentifier + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +func (o *ContainerSetAccessPolicyOptions) format() (*containerClientSetAccessPolicyOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + mac, lac := o.AccessConditions.format() + return &containerClientSetAccessPolicyOptions{ + Access: o.Access, + ContainerACL: o.ContainerACL, + RequestID: o.RequestID, + }, lac, mac +} + +// ContainerSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy +type ContainerSetAccessPolicyResponse struct { + containerClientSetAccessPolicyResponse +} + +func toContainerSetAccessPolicyResponse(resp containerClientSetAccessPolicyResponse) ContainerSetAccessPolicyResponse { + return ContainerSetAccessPolicyResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerListBlobsFlatOptions provides set of configurations for SetAccessPolicy operation +type ContainerListBlobsFlatOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +func (o *ContainerListBlobsFlatOptions) format() *containerClientListBlobFlatSegmentOptions { + if o == nil { + return nil + } + + return &containerClientListBlobFlatSegmentOptions{ + Include: o.Include, + Marker: o.Marker, + Maxresults: o.MaxResults, + Prefix: o.Prefix, + } +} + +// ContainerListBlobFlatPager provides operations for iterating over paged responses +type ContainerListBlobFlatPager struct { + *containerClientListBlobFlatSegmentPager +} + +func toContainerListBlobFlatSegmentPager(resp *containerClientListBlobFlatSegmentPager) *ContainerListBlobFlatPager { + return &ContainerListBlobFlatPager{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +//ContainerListBlobsHierarchyOptions provides set of configurations for ContainerClient.ListBlobsHierarchy +type ContainerListBlobsHierarchyOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +func (o *ContainerListBlobsHierarchyOptions) format() *containerClientListBlobHierarchySegmentOptions { + if o == nil { + return nil + } + + return &containerClientListBlobHierarchySegmentOptions{ + Include: o.Include, + Marker: o.Marker, + Maxresults: o.MaxResults, + Prefix: o.Prefix, + } +} + +// ContainerListBlobHierarchyPager provides operations for iterating over paged responses. +type ContainerListBlobHierarchyPager struct { + containerClientListBlobHierarchySegmentPager +} + +func toContainerListBlobHierarchySegmentPager(resp *containerClientListBlobHierarchySegmentPager) *ContainerListBlobHierarchyPager { + if resp == nil { + return nil + } + return &ContainerListBlobHierarchyPager{*resp} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go new file mode 100644 index 000000000..87572e917 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_container_lease_client_util.go @@ -0,0 +1,166 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" +) + +// --------------------------------------------------------------------------------------------------------------------- + +// LeaseBreakNaturally tells ContainerClient's or BlobClient's BreakLease method to break the lease using service semantics. +const LeaseBreakNaturally = -1 + +func leasePeriodPointer(period int32) *int32 { + if period != LeaseBreakNaturally { + return &period + } else { + return nil + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerAcquireLeaseOptions provides set of configurations for AcquireLeaseContainer operation +type ContainerAcquireLeaseOptions struct { + Duration *int32 + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerAcquireLeaseOptions) format() (containerClientAcquireLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return containerClientAcquireLeaseOptions{}, nil + } + containerAcquireLeaseOptions := containerClientAcquireLeaseOptions{ + Duration: o.Duration, + } + + return containerAcquireLeaseOptions, o.ModifiedAccessConditions +} + +// ContainerAcquireLeaseResponse contains the response from method ContainerLeaseClient.AcquireLease. +type ContainerAcquireLeaseResponse struct { + containerClientAcquireLeaseResponse +} + +func toContainerAcquireLeaseResponse(resp containerClientAcquireLeaseResponse) ContainerAcquireLeaseResponse { + return ContainerAcquireLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerBreakLeaseOptions provides set of configurations for BreakLeaseContainer operation +type ContainerBreakLeaseOptions struct { + BreakPeriod *int32 + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerBreakLeaseOptions) format() (*containerClientBreakLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + containerBreakLeaseOptions := &containerClientBreakLeaseOptions{ + BreakPeriod: o.BreakPeriod, + } + + return containerBreakLeaseOptions, o.ModifiedAccessConditions +} + +// ContainerBreakLeaseResponse contains the response from method ContainerLeaseClient.BreakLease. +type ContainerBreakLeaseResponse struct { + containerClientBreakLeaseResponse +} + +func toContainerBreakLeaseResponse(resp containerClientBreakLeaseResponse) ContainerBreakLeaseResponse { + return ContainerBreakLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerChangeLeaseOptions provides set of configurations for ChangeLeaseContainer operation +type ContainerChangeLeaseOptions struct { + ProposedLeaseID *string + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerChangeLeaseOptions) format() (*string, *containerClientChangeLeaseOptions, *ModifiedAccessConditions, error) { + generatedUuid, err := uuid.New() + if err != nil { + return nil, nil, nil, err + } + leaseID := to.Ptr(generatedUuid.String()) + if o == nil { + return leaseID, nil, nil, err + } + + if o.ProposedLeaseID == nil { + o.ProposedLeaseID = leaseID + } + + return o.ProposedLeaseID, nil, o.ModifiedAccessConditions, err +} + +// ContainerChangeLeaseResponse contains the response from method ContainerLeaseClient.ChangeLease. +type ContainerChangeLeaseResponse struct { + containerClientChangeLeaseResponse +} + +func toContainerChangeLeaseResponse(resp containerClientChangeLeaseResponse) ContainerChangeLeaseResponse { + return ContainerChangeLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerReleaseLeaseOptions provides set of configurations for ReleaseLeaseContainer operation +type ContainerReleaseLeaseOptions struct { + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerReleaseLeaseOptions) format() (*containerClientReleaseLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// ContainerReleaseLeaseResponse contains the response from method ContainerLeaseClient.ReleaseLease. +type ContainerReleaseLeaseResponse struct { + containerClientReleaseLeaseResponse +} + +func toContainerReleaseLeaseResponse(resp containerClientReleaseLeaseResponse) ContainerReleaseLeaseResponse { + return ContainerReleaseLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ContainerRenewLeaseOptions provides set of configurations for RenewLeaseContainer operation +type ContainerRenewLeaseOptions struct { + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *ContainerRenewLeaseOptions) format() (*containerClientRenewLeaseOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// ContainerRenewLeaseResponse contains the response from method ContainerLeaseClient.RenewLease. +type ContainerRenewLeaseResponse struct { + containerClientRenewLeaseResponse +} + +func toContainerRenewLeaseResponse(resp containerClientRenewLeaseResponse) ContainerRenewLeaseResponse { + return ContainerRenewLeaseResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go new file mode 100644 index 000000000..c7a67abe7 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_highlevel_util.go @@ -0,0 +1,201 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "context" + "fmt" +) + +const _1MiB = 1024 * 1024 + +// UploadOption identifies options used by the UploadBuffer and UploadFile functions. +type UploadOption struct { + // BlockSize specifies the block size to use; the default (and maximum size) is BlockBlobMaxStageBlockBytes. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are sent to the BlockBlobClient. + // Note that the progress reporting is not always increasing; it can go down when retrying a request. + Progress func(bytesTransferred int64) + + // HTTPHeaders indicates the HTTP headers to be associated with the blob. + HTTPHeaders *BlobHTTPHeaders + + // Metadata indicates the metadata to be associated with the blob when PutBlockList is called. + Metadata map[string]string + + // BlobAccessConditions indicates the access conditions for the block blob. + BlobAccessConditions *BlobAccessConditions + + // AccessTier indicates the tier of blob + AccessTier *AccessTier + + // TagsMap + TagsMap map[string]string + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + + // Parallelism indicates the maximum number of blocks to upload in parallel (0=default) + Parallelism uint16 + // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 *[]byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 *[]byte +} + +func (o *UploadOption) getStageBlockOptions() *BlockBlobStageBlockOptions { + leaseAccessConditions, _ := o.BlobAccessConditions.format() + return &BlockBlobStageBlockOptions{ + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (o *UploadOption) getUploadBlockBlobOptions() *BlockBlobUploadOptions { + return &BlockBlobUploadOptions{ + TagsMap: o.TagsMap, + Metadata: o.Metadata, + Tier: o.AccessTier, + HTTPHeaders: o.HTTPHeaders, + BlobAccessConditions: o.BlobAccessConditions, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + } +} + +func (o *UploadOption) getCommitBlockListOptions() *BlockBlobCommitBlockListOptions { + return &BlockBlobCommitBlockListOptions{ + BlobTagsMap: o.TagsMap, + Metadata: o.Metadata, + Tier: o.AccessTier, + BlobHTTPHeaders: o.HTTPHeaders, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// UploadStreamOptions provides set of configurations for UploadStream operation +type UploadStreamOptions struct { + // TransferManager provides a TransferManager that controls buffer allocation/reuse and + // concurrency. This overrides BufferSize and MaxBuffers if set. + TransferManager TransferManager + transferMangerNotSet bool + // BufferSize sizes the buffer used to read data from source. If < 1 MiB, defaults to 1 MiB. + BufferSize int + // MaxBuffers defines the number of simultaneous uploads will be performed to upload the file. + MaxBuffers int + HTTPHeaders *BlobHTTPHeaders + Metadata map[string]string + BlobAccessConditions *BlobAccessConditions + AccessTier *AccessTier + BlobTagsMap map[string]string + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo +} + +func (u *UploadStreamOptions) defaults() error { + if u.TransferManager != nil { + return nil + } + + if u.MaxBuffers == 0 { + u.MaxBuffers = 1 + } + + if u.BufferSize < _1MiB { + u.BufferSize = _1MiB + } + + var err error + u.TransferManager, err = NewStaticBuffer(u.BufferSize, u.MaxBuffers) + if err != nil { + return fmt.Errorf("bug: default transfer manager could not be created: %s", err) + } + u.transferMangerNotSet = true + return nil +} + +func (u *UploadStreamOptions) getStageBlockOptions() *BlockBlobStageBlockOptions { + leaseAccessConditions, _ := u.BlobAccessConditions.format() + return &BlockBlobStageBlockOptions{ + CpkInfo: u.CpkInfo, + CpkScopeInfo: u.CpkScopeInfo, + LeaseAccessConditions: leaseAccessConditions, + } +} + +func (u *UploadStreamOptions) getCommitBlockListOptions() *BlockBlobCommitBlockListOptions { + options := &BlockBlobCommitBlockListOptions{ + BlobTagsMap: u.BlobTagsMap, + Metadata: u.Metadata, + Tier: u.AccessTier, + BlobHTTPHeaders: u.HTTPHeaders, + CpkInfo: u.CpkInfo, + CpkScopeInfo: u.CpkScopeInfo, + BlobAccessConditions: u.BlobAccessConditions, + } + + return options +} + +// --------------------------------------------------------------------------------------------------------------------- + +// DownloadOptions identifies options used by the DownloadToBuffer and DownloadToFile functions. +type DownloadOptions struct { + // BlockSize specifies the block size to use for each parallel download; the default size is BlobDefaultDownloadBlockSize. + BlockSize int64 + + // Progress is a function that is invoked periodically as bytes are received. + Progress func(bytesTransferred int64) + + // BlobAccessConditions indicates the access conditions used when making HTTP GET requests against the blob. + BlobAccessConditions *BlobAccessConditions + + // ClientProvidedKeyOptions indicates the client provided key by name and/or by value to encrypt/decrypt data. + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + + // Parallelism indicates the maximum number of blocks to download in parallel (0=default) + Parallelism uint16 + + // RetryReaderOptionsPerBlock is used when downloading each block. + RetryReaderOptionsPerBlock RetryReaderOptions +} + +func (o *DownloadOptions) getBlobPropertiesOptions() *BlobGetPropertiesOptions { + return &BlobGetPropertiesOptions{ + BlobAccessConditions: o.BlobAccessConditions, + CpkInfo: o.CpkInfo, + } +} + +func (o *DownloadOptions) getDownloadBlobOptions(offSet, count int64, rangeGetContentMD5 *bool) *BlobDownloadOptions { + return &BlobDownloadOptions{ + BlobAccessConditions: o.BlobAccessConditions, + CpkInfo: o.CpkInfo, + CpkScopeInfo: o.CpkScopeInfo, + Offset: &offSet, + Count: &count, + RangeGetContentMD5: rangeGetContentMD5, + } +} + +// --------------------------------------------------------------------------------------------------------------------- + +// BatchTransferOptions identifies options used by DoBatchTransfer. +type BatchTransferOptions struct { + TransferSize int64 + ChunkSize int64 + Parallelism uint16 + Operation func(offset int64, chunkSize int64, ctx context.Context) error + OperationName string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go new file mode 100644 index 000000000..2be275873 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_page_blob_client_util.go @@ -0,0 +1,402 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "strconv" + "time" +) + +// --------------------------------------------------------------------------------------------------------------------- + +func rangeToString(offset, count int64) string { + return "bytes=" + strconv.FormatInt(offset, 10) + "-" + strconv.FormatInt(offset+count-1, 10) +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobCreateOptions provides set of configurations for CreatePageBlob operation +type PageBlobCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + // Optional. Used to set blob tags in various blob operations. + BlobTagsMap map[string]string + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination blob. If one or more name-value pairs + // are specified, the destination blob is created with the specified metadata, and metadata is not copied from the source + // blob or file. Note that beginning with version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. + // See Naming and Referencing Containers, Blobs, and Metadata for more information. + Metadata map[string]string + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + + HTTPHeaders *BlobHTTPHeaders + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + + BlobAccessConditions *BlobAccessConditions + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool +} + +func (o *PageBlobCreateOptions) format() (*pageBlobClientCreateOptions, *BlobHTTPHeaders, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &pageBlobClientCreateOptions{ + BlobSequenceNumber: o.BlobSequenceNumber, + BlobTagsString: serializeBlobTagsToStrPtr(o.BlobTagsMap), + Metadata: o.Metadata, + Tier: o.Tier, + } + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.HTTPHeaders, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// PageBlobCreateResponse contains the response from method PageBlobClient.Create. +type PageBlobCreateResponse struct { + pageBlobClientCreateResponse +} + +func toPageBlobCreateResponse(resp pageBlobClientCreateResponse) PageBlobCreateResponse { + return PageBlobCreateResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobUploadPagesOptions provides set of configurations for UploadPages operation +type PageBlobUploadPagesOptions struct { + // Specify the transactional crc64 for the body, to be validated by the service. + PageRange *HttpRange + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + SequenceNumberAccessConditions *SequenceNumberAccessConditions + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobUploadPagesOptions) format() (*pageBlobClientUploadPagesOptions, *LeaseAccessConditions, + *CpkInfo, *CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil + } + + options := &pageBlobClientUploadPagesOptions{ + TransactionalContentCRC64: o.TransactionalContentCRC64, + TransactionalContentMD5: o.TransactionalContentMD5, + } + + if o.PageRange != nil { + options.Range = o.PageRange.format() + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions +} + +// PageBlobUploadPagesResponse contains the response from method PageBlobClient.UploadPages. +type PageBlobUploadPagesResponse struct { + pageBlobClientUploadPagesResponse +} + +func toPageBlobUploadPagesResponse(resp pageBlobClientUploadPagesResponse) PageBlobUploadPagesResponse { + return PageBlobUploadPagesResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobUploadPagesFromURLOptions provides set of configurations for UploadPagesFromURL operation +type PageBlobUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentCRC64 []byte + + CpkInfo *CpkInfo + + CpkScopeInfo *CpkScopeInfo + + SequenceNumberAccessConditions *SequenceNumberAccessConditions + + SourceModifiedAccessConditions *SourceModifiedAccessConditions + + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobUploadPagesFromURLOptions) format() (*pageBlobClientUploadPagesFromURLOptions, *CpkInfo, *CpkScopeInfo, + *LeaseAccessConditions, *SequenceNumberAccessConditions, *ModifiedAccessConditions, *SourceModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil, nil, nil + } + + options := &pageBlobClientUploadPagesFromURLOptions{ + SourceContentMD5: o.SourceContentMD5, + SourceContentcrc64: o.SourceContentCRC64, + CopySourceAuthorization: o.CopySourceAuthorization, + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return options, o.CpkInfo, o.CpkScopeInfo, leaseAccessConditions, o.SequenceNumberAccessConditions, modifiedAccessConditions, o.SourceModifiedAccessConditions +} + +// PageBlobUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL +type PageBlobUploadPagesFromURLResponse struct { + pageBlobClientUploadPagesFromURLResponse +} + +func toPageBlobUploadPagesFromURLResponse(resp pageBlobClientUploadPagesFromURLResponse) PageBlobUploadPagesFromURLResponse { + return PageBlobUploadPagesFromURLResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobClearPagesOptions provides set of configurations for PageBlobClient.ClearPages operation +type PageBlobClearPagesOptions struct { + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + SequenceNumberAccessConditions *SequenceNumberAccessConditions + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobClearPagesOptions) format() (*LeaseAccessConditions, *CpkInfo, + *CpkScopeInfo, *SequenceNumberAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, o.SequenceNumberAccessConditions, modifiedAccessConditions +} + +// PageBlobClearPagesResponse contains the response from method PageBlobClient.ClearPages +type PageBlobClearPagesResponse struct { + pageBlobClientClearPagesResponse +} + +func toPageBlobClearPagesResponse(resp pageBlobClientClearPagesResponse) PageBlobClearPagesResponse { + return PageBlobClearPagesResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobGetPageRangesOptions provides set of configurations for GetPageRanges operation +type PageBlobGetPageRangesOptions struct { + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + PrevSnapshot *string + // Optional, you can specify whether a particular range of the blob is read + PageRange *HttpRange + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobGetPageRangesOptions) format() (*pageBlobClientGetPageRangesOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &pageBlobClientGetPageRangesOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Range: o.PageRange.format(), + Snapshot: o.Snapshot, + }, leaseAccessConditions, modifiedAccessConditions +} + +// PageBlobGetPageRangesPager provides operations for iterating over paged responses +type PageBlobGetPageRangesPager struct { + *pageBlobClientGetPageRangesPager +} + +func toPageBlobGetPageRangesPager(resp *pageBlobClientGetPageRangesPager) *PageBlobGetPageRangesPager { + return &PageBlobGetPageRangesPager{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobGetPageRangesDiffOptions provides set of configurations for PageBlobClient.GetPageRangesDiff operation +type PageBlobGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + PrevSnapshot *string + // Optional, you can specify whether a particular range of the blob is read + PageRange *HttpRange + + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobGetPageRangesDiffOptions) format() (*pageBlobClientGetPageRangesDiffOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return &pageBlobClientGetPageRangesDiffOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + PrevSnapshotURL: o.PrevSnapshotURL, + Prevsnapshot: o.PrevSnapshot, + Range: o.PageRange.format(), + Snapshot: o.Snapshot, + }, leaseAccessConditions, modifiedAccessConditions + +} + +// PageBlobGetPageRangesDiffPager provides operations for iterating over paged responses +type PageBlobGetPageRangesDiffPager struct { + *pageBlobClientGetPageRangesDiffPager +} + +func toPageBlobGetPageRangesDiffPager(resp *pageBlobClientGetPageRangesDiffPager) *PageBlobGetPageRangesDiffPager { + return &PageBlobGetPageRangesDiffPager{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobResizeOptions provides set of configurations for PageBlobClient.Resize operation +type PageBlobResizeOptions struct { + CpkInfo *CpkInfo + CpkScopeInfo *CpkScopeInfo + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobResizeOptions) format() (*pageBlobClientResizeOptions, *LeaseAccessConditions, *CpkInfo, *CpkScopeInfo, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil, nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return nil, leaseAccessConditions, o.CpkInfo, o.CpkScopeInfo, modifiedAccessConditions +} + +// PageBlobResizeResponse contains the response from method PageBlobClient.Resize +type PageBlobResizeResponse struct { + pageBlobClientResizeResponse +} + +func toPageBlobResizeResponse(resp pageBlobClientResizeResponse) PageBlobResizeResponse { + return PageBlobResizeResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobUpdateSequenceNumberOptions provides set of configurations for PageBlobClient.UpdateSequenceNumber operation +type PageBlobUpdateSequenceNumberOptions struct { + ActionType *SequenceNumberActionType + + BlobSequenceNumber *int64 + + BlobAccessConditions *BlobAccessConditions +} + +func (o *PageBlobUpdateSequenceNumberOptions) format() (*SequenceNumberActionType, *pageBlobClientUpdateSequenceNumberOptions, *LeaseAccessConditions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil, nil, nil + } + + options := &pageBlobClientUpdateSequenceNumberOptions{ + BlobSequenceNumber: o.BlobSequenceNumber, + } + + if *o.ActionType == SequenceNumberActionTypeIncrement { + options.BlobSequenceNumber = nil + } + + leaseAccessConditions, modifiedAccessConditions := o.BlobAccessConditions.format() + return o.ActionType, options, leaseAccessConditions, modifiedAccessConditions +} + +// PageBlobUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber +type PageBlobUpdateSequenceNumberResponse struct { + pageBlobClientUpdateSequenceNumberResponse +} + +func toPageBlobUpdateSequenceNumberResponse(resp pageBlobClientUpdateSequenceNumberResponse) PageBlobUpdateSequenceNumberResponse { + return PageBlobUpdateSequenceNumberResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// PageBlobCopyIncrementalOptions provides set of configurations for PageBlobClient.StartCopyIncremental operation +type PageBlobCopyIncrementalOptions struct { + ModifiedAccessConditions *ModifiedAccessConditions +} + +func (o *PageBlobCopyIncrementalOptions) format() (*pageBlobClientCopyIncrementalOptions, *ModifiedAccessConditions) { + if o == nil { + return nil, nil + } + + return nil, o.ModifiedAccessConditions +} + +// PageBlobCopyIncrementalResponse contains the response from method PageBlobClient.StartCopyIncremental +type PageBlobCopyIncrementalResponse struct { + pageBlobClientCopyIncrementalResponse +} + +func toPageBlobCopyIncrementalResponse(resp pageBlobClientCopyIncrementalResponse) PageBlobCopyIncrementalResponse { + return PageBlobCopyIncrementalResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go new file mode 100644 index 000000000..3cf85ca43 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_serialize_and_desearilize_util.go @@ -0,0 +1,68 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +import ( + "net/url" + "strings" +) + +func serializeBlobTagsToStrPtr(tagsMap map[string]string) *string { + if tagsMap == nil { + return nil + } + tags := make([]string, 0) + for key, val := range tagsMap { + tags = append(tags, url.QueryEscape(key)+"="+url.QueryEscape(val)) + } + //tags = tags[:len(tags)-1] + blobTagsString := strings.Join(tags, "&") + return &blobTagsString +} + +func serializeBlobTags(tagsMap map[string]string) *BlobTags { + if tagsMap == nil { + return nil + } + blobTagSet := make([]*BlobTag, 0) + for key, val := range tagsMap { + newKey, newVal := key, val + blobTagSet = append(blobTagSet, &BlobTag{Key: &newKey, Value: &newVal}) + } + return &BlobTags{BlobTagSet: blobTagSet} +} + +func deserializeORSPolicies(policies map[string]string) (objectReplicationPolicies []ObjectReplicationPolicy) { + if policies == nil { + return nil + } + // For source blobs (blobs that have policy ids and rule ids applied to them), + // the header will be formatted as "x-ms-or-_: {Complete, Failed}". + // The value of this header is the status of the replication. + orPolicyStatusHeader := make(map[string]string) + for key, value := range policies { + if strings.Contains(key, "or-") && key != "x-ms-or-policy-id" { + orPolicyStatusHeader[key] = value + } + } + + parsedResult := make(map[string][]ObjectReplicationRules) + for key, value := range orPolicyStatusHeader { + policyAndRuleIDs := strings.Split(strings.Split(key, "or-")[1], "_") + policyId, ruleId := policyAndRuleIDs[0], policyAndRuleIDs[1] + + parsedResult[policyId] = append(parsedResult[policyId], ObjectReplicationRules{RuleId: ruleId, Status: value}) + } + + for policyId, rules := range parsedResult { + objectReplicationPolicies = append(objectReplicationPolicies, ObjectReplicationPolicy{ + PolicyId: &policyId, + Rules: &rules, + }) + } + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go new file mode 100644 index 000000000..747a94ee2 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zm_service_client_util.go @@ -0,0 +1,226 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azblob + +// --------------------------------------------------------------------------------------------------------------------- + +// ServiceGetAccountInfoOptions provides set of options for ServiceClient.GetAccountInfo +type ServiceGetAccountInfoOptions struct { + // placeholder for future options +} + +func (o *ServiceGetAccountInfoOptions) format() *serviceClientGetAccountInfoOptions { + return nil +} + +// ServiceGetAccountInfoResponse contains the response from ServiceClient.GetAccountInfo +type ServiceGetAccountInfoResponse struct { + serviceClientGetAccountInfoResponse +} + +func toServiceGetAccountInfoResponse(resp serviceClientGetAccountInfoResponse) ServiceGetAccountInfoResponse { + return ServiceGetAccountInfoResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ListContainersDetail indicates what additional information the service should return with each container. +type ListContainersDetail struct { + // Tells the service whether to return metadata for each container. + Metadata bool + + // Tells the service whether to return soft-deleted containers. + Deleted bool +} + +// string produces the `Include` query parameter's value. +func (o *ListContainersDetail) format() []ListContainersIncludeType { + if !o.Metadata && !o.Deleted { + return nil + } + + items := make([]ListContainersIncludeType, 0, 2) + // NOTE: Multiple strings MUST be appended in alphabetic order or signing the string for authentication fails! + if o.Deleted { + items = append(items, ListContainersIncludeTypeDeleted) + } + if o.Metadata { + items = append(items, ListContainersIncludeTypeMetadata) + } + return items +} + +// ListContainersOptions provides set of configurations for ListContainers operation +type ListContainersOptions struct { + Include ListContainersDetail + + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing operation did not return all containers + // remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in + // a subsequent call to request the next page of list items. The marker value is opaque to the client. + Marker *string + + // Specifies the maximum number of containers to return. If the request does not specify max results, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, + // then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible + // that the service will return fewer results than specified by max results, or than the default of 5000. + MaxResults *int32 + + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string +} + +func (o *ListContainersOptions) format() *serviceClientListContainersSegmentOptions { + if o == nil { + return nil + } + + return &serviceClientListContainersSegmentOptions{ + Include: o.Include.format(), + Marker: o.Marker, + Maxresults: o.MaxResults, + Prefix: o.Prefix, + } +} + +// ServiceListContainersSegmentPager provides operations for iterating over paged responses. +type ServiceListContainersSegmentPager struct { + serviceClientListContainersSegmentPager +} + +func toServiceListContainersSegmentPager(resp serviceClientListContainersSegmentPager) *ServiceListContainersSegmentPager { + return &ServiceListContainersSegmentPager{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ServiceGetPropertiesOptions provides set of options for ServiceClient.GetProperties +type ServiceGetPropertiesOptions struct { + // placeholder for future options +} + +func (o *ServiceGetPropertiesOptions) format() *serviceClientGetPropertiesOptions { + return nil +} + +// ServiceGetPropertiesResponse contains the response from ServiceClient.GetProperties +type ServiceGetPropertiesResponse struct { + serviceClientGetPropertiesResponse +} + +func toServiceGetPropertiesResponse(resp serviceClientGetPropertiesResponse) ServiceGetPropertiesResponse { + return ServiceGetPropertiesResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ServiceSetPropertiesOptions provides set of options for ServiceClient.SetProperties +type ServiceSetPropertiesOptions struct { + // The set of CORS rules. + Cors []*CorsRule + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics + + // Azure Analytics Logging settings. + Logging *Logging + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite +} + +func (o *ServiceSetPropertiesOptions) format() (StorageServiceProperties, *serviceClientSetPropertiesOptions) { + if o == nil { + return StorageServiceProperties{}, nil + } + + return StorageServiceProperties{ + Cors: o.Cors, + DefaultServiceVersion: o.DefaultServiceVersion, + DeleteRetentionPolicy: o.DeleteRetentionPolicy, + HourMetrics: o.HourMetrics, + Logging: o.Logging, + MinuteMetrics: o.MinuteMetrics, + StaticWebsite: o.StaticWebsite, + }, nil +} + +// ServiceSetPropertiesResponse contains the response from ServiceClient.SetProperties +type ServiceSetPropertiesResponse struct { + serviceClientSetPropertiesResponse +} + +func toServiceSetPropertiesResponse(resp serviceClientSetPropertiesResponse) ServiceSetPropertiesResponse { + return ServiceSetPropertiesResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ServiceGetStatisticsOptions provides set of options for ServiceClient.GetStatistics +type ServiceGetStatisticsOptions struct { + // placeholder for future options +} + +func (o *ServiceGetStatisticsOptions) format() *serviceClientGetStatisticsOptions { + return nil +} + +// ServiceGetStatisticsResponse contains the response from ServiceClient.GetStatistics. +type ServiceGetStatisticsResponse struct { + serviceClientGetStatisticsResponse +} + +func toServiceGetStatisticsResponse(resp serviceClientGetStatisticsResponse) ServiceGetStatisticsResponse { + return ServiceGetStatisticsResponse{resp} +} + +// --------------------------------------------------------------------------------------------------------------------- + +// ServiceFilterBlobsOptions provides set of configurations for ServiceClient.FindBlobsByTags +type ServiceFilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The operation returns the NextMarker + // value within the response body if the listing operation did not return all containers remaining to be listed with the current page. The NextMarker value + // can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server + // will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for + // retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or + // than the default of 5000. + MaxResults *int32 + // Filters the results to return only to return only blobs whose tags match the specified expression. + Where *string +} + +func (o *ServiceFilterBlobsOptions) pointer() *serviceClientFilterBlobsOptions { + if o == nil { + return nil + } + return &serviceClientFilterBlobsOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + Where: o.Where, + } +} + +// ServiceFilterBlobsResponse contains the response from ServiceClient.FindBlobsByTags +type ServiceFilterBlobsResponse struct { + serviceClientFilterBlobsResponse +} + +func toServiceFilterBlobsResponse(resp serviceClientFilterBlobsResponse) ServiceFilterBlobsResponse { + return ServiceFilterBlobsResponse{resp} +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go new file mode 100644 index 000000000..ca5aac8cd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_appendblob_client.go @@ -0,0 +1,648 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +type appendBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// newAppendBlobClient creates a new instance of appendBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newAppendBlobClient(endpoint string, pl runtime.Pipeline) *appendBlobClient { + client := &appendBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append +// Block operation is permitted only if the blob was created with x-ms-blob-type set to +// AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// body - Initial data +// appendBlobClientAppendBlockOptions - appendBlobClientAppendBlockOptions contains the optional parameters for the appendBlobClient.AppendBlock +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock +// method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *appendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobClientAppendBlockOptions *appendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (appendBlobClientAppendBlockResponse, error) { + req, err := client.appendBlockCreateRequest(ctx, contentLength, body, appendBlobClientAppendBlockOptions, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return appendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockHandleResponse(resp) +} + +// appendBlockCreateRequest creates the AppendBlock request. +func (client *appendBlobClient) appendBlockCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, appendBlobClientAppendBlockOptions *appendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientAppendBlockOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockOptions.TransactionalContentMD5)) + } + if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.TransactionalContentCRC64 != nil { + req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockOptions.TransactionalContentCRC64)) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)) + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if appendBlobClientAppendBlockOptions != nil && appendBlobClientAppendBlockOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientAppendBlockOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, req.SetBody(body, "application/octet-stream") +} + +// appendBlockHandleResponse handles the AppendBlock response. +func (client *appendBlobClient) appendBlockHandleResponse(resp *http.Response) (appendBlobClientAppendBlockResponse, error) { + result := appendBlobClientAppendBlockResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return appendBlobClientAppendBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// AppendBlockFromURL - The Append Block operation commits a new block of data to the end of an existing append blob where +// the contents are read from a source url. The Append Block operation is permitted only if the blob was +// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. +// If the operation fails it returns an *azcore.ResponseError type. +// sourceURL - Specify a URL to the copy source. +// contentLength - The length of the request. +// appendBlobClientAppendBlockFromURLOptions - appendBlobClientAppendBlockFromURLOptions contains the optional parameters +// for the appendBlobClient.AppendBlockFromURL method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +func (client *appendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, appendBlobClientAppendBlockFromURLOptions *appendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (appendBlobClientAppendBlockFromURLResponse, error) { + req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, appendBlobClientAppendBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return appendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.appendBlockFromURLHandleResponse(resp) +} + +// appendBlockFromURLCreateRequest creates the AppendBlockFromURL request. +func (client *appendBlobClient) appendBlockFromURLCreateRequest(ctx context.Context, sourceURL string, contentLength int64, appendBlobClientAppendBlockFromURLOptions *appendBlobClientAppendBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "appendblock") + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientAppendBlockFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-copy-source", sourceURL) + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceRange != nil { + req.Raw().Header.Set("x-ms-source-range", *appendBlobClientAppendBlockFromURLOptions.SourceRange) + } + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceContentMD5 != nil { + req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.SourceContentMD5)) + } + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.SourceContentcrc64 != nil { + req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.SourceContentcrc64)) + } + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(appendBlobClientAppendBlockFromURLOptions.TransactionalContentMD5)) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil { + req.Raw().Header.Set("x-ms-blob-condition-maxsize", strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)) + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientAppendBlockFromURLOptions.RequestID) + } + if appendBlobClientAppendBlockFromURLOptions != nil && appendBlobClientAppendBlockFromURLOptions.CopySourceAuthorization != nil { + req.Raw().Header.Set("x-ms-copy-source-authorization", *appendBlobClientAppendBlockFromURLOptions.CopySourceAuthorization) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// appendBlockFromURLHandleResponse handles the AppendBlockFromURL response. +func (client *appendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (appendBlobClientAppendBlockFromURLResponse, error) { + result := appendBlobClientAppendBlockFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" { + result.BlobAppendOffset = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return appendBlobClientAppendBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// Create - The Create Append Blob operation creates a new append blob. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// appendBlobClientCreateOptions - appendBlobClientCreateOptions contains the optional parameters for the appendBlobClient.Create +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *appendBlobClient) Create(ctx context.Context, contentLength int64, appendBlobClientCreateOptions *appendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (appendBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, appendBlobClientCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return appendBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *appendBlobClient) createCreateRequest(ctx context.Context, contentLength int64, appendBlobClientCreateOptions *appendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientCreateOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-blob-type", "AppendBlob") + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.Metadata != nil { + for k, v := range appendBlobClientCreateOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientCreateOptions.RequestID) + } + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *appendBlobClientCreateOptions.BlobTagsString) + } + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", appendBlobClientCreateOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*appendBlobClientCreateOptions.ImmutabilityPolicyMode)) + } + if appendBlobClientCreateOptions != nil && appendBlobClientCreateOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*appendBlobClientCreateOptions.LegalHold)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *appendBlobClient) createHandleResponse(resp *http.Response) (appendBlobClientCreateResponse, error) { + result := appendBlobClientCreateResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return appendBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Seal - The Seal operation seals the Append Blob to make it read-only. Seal is supported only on version 2019-12-12 version +// or later. +// If the operation fails it returns an *azcore.ResponseError type. +// appendBlobClientSealOptions - appendBlobClientSealOptions contains the optional parameters for the appendBlobClient.Seal +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock +// method. +func (client *appendBlobClient) Seal(ctx context.Context, appendBlobClientSealOptions *appendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (appendBlobClientSealResponse, error) { + req, err := client.sealCreateRequest(ctx, appendBlobClientSealOptions, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions) + if err != nil { + return appendBlobClientSealResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return appendBlobClientSealResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return appendBlobClientSealResponse{}, runtime.NewResponseError(resp) + } + return client.sealHandleResponse(resp) +} + +// sealCreateRequest creates the Seal request. +func (client *appendBlobClient) sealCreateRequest(ctx context.Context, appendBlobClientSealOptions *appendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "seal") + if appendBlobClientSealOptions != nil && appendBlobClientSealOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*appendBlobClientSealOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if appendBlobClientSealOptions != nil && appendBlobClientSealOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *appendBlobClientSealOptions.RequestID) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil { + req.Raw().Header.Set("x-ms-blob-condition-appendpos", strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// sealHandleResponse handles the Seal response. +func (client *appendBlobClient) sealHandleResponse(resp *http.Response) (appendBlobClientSealResponse, error) { + result := appendBlobClientSealResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientSealResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return appendBlobClientSealResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return appendBlobClientSealResponse{}, err + } + result.IsSealed = &isSealed + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go new file mode 100644 index 000000000..607c6a714 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blob_client.go @@ -0,0 +1,2831 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "strconv" + "strings" + "time" +) + +type blobClient struct { + endpoint string + pl runtime.Pipeline +} + +// newBlobClient creates a new instance of blobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newBlobClient(endpoint string, pl runtime.Pipeline) *blobClient { + client := &blobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination +// blob with zero length and full metadata. +// If the operation fails it returns an *azcore.ResponseError type. +// copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. +// blobClientAbortCopyFromURLOptions - blobClientAbortCopyFromURLOptions contains the optional parameters for the blobClient.AbortCopyFromURL +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) AbortCopyFromURL(ctx context.Context, copyID string, blobClientAbortCopyFromURLOptions *blobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (blobClientAbortCopyFromURLResponse, error) { + req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, blobClientAbortCopyFromURLOptions, leaseAccessConditions) + if err != nil { + return blobClientAbortCopyFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientAbortCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return blobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.abortCopyFromURLHandleResponse(resp) +} + +// abortCopyFromURLCreateRequest creates the AbortCopyFromURL request. +func (client *blobClient) abortCopyFromURLCreateRequest(ctx context.Context, copyID string, blobClientAbortCopyFromURLOptions *blobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "copy") + reqQP.Set("copyid", copyID) + if blobClientAbortCopyFromURLOptions != nil && blobClientAbortCopyFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientAbortCopyFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-copy-action", "abort") + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientAbortCopyFromURLOptions != nil && blobClientAbortCopyFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientAbortCopyFromURLOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// abortCopyFromURLHandleResponse handles the AbortCopyFromURL response. +func (client *blobClient) abortCopyFromURLHandleResponse(resp *http.Response) (blobClientAbortCopyFromURLResponse, error) { + result := blobClientAbortCopyFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientAbortCopyFromURLResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientAcquireLeaseOptions - blobClientAcquireLeaseOptions contains the optional parameters for the blobClient.AcquireLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) AcquireLease(ctx context.Context, blobClientAcquireLeaseOptions *blobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, blobClientAcquireLeaseOptions, modifiedAccessConditions) + if err != nil { + return blobClientAcquireLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *blobClient) acquireLeaseCreateRequest(ctx context.Context, blobClientAcquireLeaseOptions *blobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientAcquireLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "acquire") + if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.Duration != nil { + req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*blobClientAcquireLeaseOptions.Duration), 10)) + } + if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.ProposedLeaseID != nil { + req.Raw().Header.Set("x-ms-proposed-lease-id", *blobClientAcquireLeaseOptions.ProposedLeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientAcquireLeaseOptions != nil && blobClientAcquireLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientAcquireLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *blobClient) acquireLeaseHandleResponse(resp *http.Response) (blobClientAcquireLeaseResponse, error) { + result := blobClientAcquireLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientBreakLeaseOptions - blobClientBreakLeaseOptions contains the optional parameters for the blobClient.BreakLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) BreakLease(ctx context.Context, blobClientBreakLeaseOptions *blobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, blobClientBreakLeaseOptions, modifiedAccessConditions) + if err != nil { + return blobClientBreakLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return blobClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *blobClient) breakLeaseCreateRequest(ctx context.Context, blobClientBreakLeaseOptions *blobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientBreakLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "break") + if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.BreakPeriod != nil { + req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*blobClientBreakLeaseOptions.BreakPeriod), 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientBreakLeaseOptions != nil && blobClientBreakLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientBreakLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *blobClient) breakLeaseHandleResponse(resp *http.Response) (blobClientBreakLeaseResponse, error) { + result := blobClientBreakLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return blobClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// blobClientChangeLeaseOptions - blobClientChangeLeaseOptions contains the optional parameters for the blobClient.ChangeLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, blobClientChangeLeaseOptions *blobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, blobClientChangeLeaseOptions, modifiedAccessConditions) + if err != nil { + return blobClientChangeLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *blobClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, blobClientChangeLeaseOptions *blobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if blobClientChangeLeaseOptions != nil && blobClientChangeLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientChangeLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "change") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientChangeLeaseOptions != nil && blobClientChangeLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientChangeLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *blobClient) changeLeaseHandleResponse(resp *http.Response) (blobClientChangeLeaseResponse, error) { + result := blobClientChangeLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CopyFromURL - The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response +// until the copy is complete. +// If the operation fails it returns an *azcore.ResponseError type. +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// blobClientCopyFromURLOptions - blobClientCopyFromURLOptions contains the optional parameters for the blobClient.CopyFromURL +// method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) CopyFromURL(ctx context.Context, copySource string, blobClientCopyFromURLOptions *blobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientCopyFromURLResponse, error) { + req, err := client.copyFromURLCreateRequest(ctx, copySource, blobClientCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return blobClientCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.copyFromURLHandleResponse(resp) +} + +// copyFromURLCreateRequest creates the CopyFromURL request. +func (client *blobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, blobClientCopyFromURLOptions *blobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientCopyFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-requires-sync", "true") + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Metadata != nil { + for k, v := range blobClientCopyFromURLOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*blobClientCopyFromURLOptions.Tier)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-copy-source", copySource) + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientCopyFromURLOptions.RequestID) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.SourceContentMD5 != nil { + req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blobClientCopyFromURLOptions.SourceContentMD5)) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *blobClientCopyFromURLOptions.BlobTagsString) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientCopyFromURLOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientCopyFromURLOptions.ImmutabilityPolicyMode)) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blobClientCopyFromURLOptions.LegalHold)) + } + if blobClientCopyFromURLOptions != nil && blobClientCopyFromURLOptions.CopySourceAuthorization != nil { + req.Raw().Header.Set("x-ms-copy-source-authorization", *blobClientCopyFromURLOptions.CopySourceAuthorization) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// copyFromURLHandleResponse handles the CopyFromURL response. +func (client *blobClient) copyFromURLHandleResponse(resp *http.Response) (blobClientCopyFromURLResponse, error) { + result := blobClientCopyFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientCopyFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + return result, nil +} + +// CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientCreateSnapshotOptions - blobClientCreateSnapshotOptions contains the optional parameters for the blobClient.CreateSnapshot +// method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) CreateSnapshot(ctx context.Context, blobClientCreateSnapshotOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientCreateSnapshotResponse, error) { + req, err := client.createSnapshotCreateRequest(ctx, blobClientCreateSnapshotOptions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return blobClientCreateSnapshotResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientCreateSnapshotResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp) + } + return client.createSnapshotHandleResponse(resp) +} + +// createSnapshotCreateRequest creates the CreateSnapshot request. +func (client *blobClient) createSnapshotCreateRequest(ctx context.Context, blobClientCreateSnapshotOptions *blobClientCreateSnapshotOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "snapshot") + if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientCreateSnapshotOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.Metadata != nil { + for k, v := range blobClientCreateSnapshotOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientCreateSnapshotOptions != nil && blobClientCreateSnapshotOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientCreateSnapshotOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// createSnapshotHandleResponse handles the CreateSnapshot response. +func (client *blobClient) createSnapshotHandleResponse(resp *http.Response) (blobClientCreateSnapshotResponse, error) { + result := blobClientCreateSnapshotResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-snapshot"); val != "" { + result.Snapshot = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientCreateSnapshotResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientCreateSnapshotResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blobClientCreateSnapshotResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + return result, nil +} + +// Delete - If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed +// from the storage account. If the storage account's soft delete feature is enabled, +// then, when a blob is deleted, it is marked for deletion and becomes inaccessible immediately. However, the blob service +// retains the blob or snapshot for the number of days specified by the +// DeleteRetentionPolicy section of Storage service properties [Set-Blob-Service-Properties.md]. After the specified number +// of days has passed, the blob's data is permanently removed from the storage +// account. Note that you continue to be charged for the soft-deleted blob's storage until it is permanently removed. Use +// the List Blobs API and specify the "include=deleted" query parameter to discover +// which blobs and snapshots have been soft deleted. You can then use the Undelete Blob API to restore a soft-deleted blob. +// All other operations on a soft-deleted blob or snapshot causes the service to +// return an HTTP status code of 404 (ResourceNotFound). +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientDeleteOptions - blobClientDeleteOptions contains the optional parameters for the blobClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) Delete(ctx context.Context, blobClientDeleteOptions *blobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, blobClientDeleteOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return blobClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return blobClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *blobClient) deleteCreateRequest(ctx context.Context, blobClientDeleteOptions *blobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blobClientDeleteOptions != nil && blobClientDeleteOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientDeleteOptions.Snapshot) + } + if blobClientDeleteOptions != nil && blobClientDeleteOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientDeleteOptions.VersionID) + } + if blobClientDeleteOptions != nil && blobClientDeleteOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientDeleteOptions.Timeout), 10)) + } + if blobClientDeleteOptions != nil && blobClientDeleteOptions.BlobDeleteType != nil { + reqQP.Set("deletetype", string(*blobClientDeleteOptions.BlobDeleteType)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobClientDeleteOptions != nil && blobClientDeleteOptions.DeleteSnapshots != nil { + req.Raw().Header.Set("x-ms-delete-snapshots", string(*blobClientDeleteOptions.DeleteSnapshots)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientDeleteOptions != nil && blobClientDeleteOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientDeleteOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *blobClient) deleteHandleResponse(resp *http.Response) (blobClientDeleteResponse, error) { + result := blobClientDeleteResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// options - blobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the blobClient.DeleteImmutabilityPolicy +// method. +func (client *blobClient) DeleteImmutabilityPolicy(ctx context.Context, options *blobClientDeleteImmutabilityPolicyOptions) (blobClientDeleteImmutabilityPolicyResponse, error) { + req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options) + if err != nil { + return blobClientDeleteImmutabilityPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientDeleteImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.deleteImmutabilityPolicyHandleResponse(resp) +} + +// deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request. +func (client *blobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Context, options *blobClientDeleteImmutabilityPolicyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// deleteImmutabilityPolicyHandleResponse handles the DeleteImmutabilityPolicy response. +func (client *blobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Response) (blobClientDeleteImmutabilityPolicyResponse, error) { + result := blobClientDeleteImmutabilityPolicyResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDeleteImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Download - The Download operation reads or downloads a blob from the system, including its metadata and properties. You +// can also call Download to read a snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientDownloadOptions - blobClientDownloadOptions contains the optional parameters for the blobClient.Download method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) Download(ctx context.Context, blobClientDownloadOptions *blobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientDownloadResponse, error) { + req, err := client.downloadCreateRequest(ctx, blobClientDownloadOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return blobClientDownloadResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientDownloadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) { + return blobClientDownloadResponse{}, runtime.NewResponseError(resp) + } + return client.downloadHandleResponse(resp) +} + +// downloadCreateRequest creates the Download request. +func (client *blobClient) downloadCreateRequest(ctx context.Context, blobClientDownloadOptions *blobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blobClientDownloadOptions != nil && blobClientDownloadOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientDownloadOptions.Snapshot) + } + if blobClientDownloadOptions != nil && blobClientDownloadOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientDownloadOptions.VersionID) + } + if blobClientDownloadOptions != nil && blobClientDownloadOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientDownloadOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + if blobClientDownloadOptions != nil && blobClientDownloadOptions.Range != nil { + req.Raw().Header.Set("x-ms-range", *blobClientDownloadOptions.Range) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobClientDownloadOptions != nil && blobClientDownloadOptions.RangeGetContentMD5 != nil { + req.Raw().Header.Set("x-ms-range-get-content-md5", strconv.FormatBool(*blobClientDownloadOptions.RangeGetContentMD5)) + } + if blobClientDownloadOptions != nil && blobClientDownloadOptions.RangeGetContentCRC64 != nil { + req.Raw().Header.Set("x-ms-range-get-content-crc64", strconv.FormatBool(*blobClientDownloadOptions.RangeGetContentCRC64)) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientDownloadOptions != nil && blobClientDownloadOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientDownloadOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// downloadHandleResponse handles the Download response. +func (client *blobClient) downloadHandleResponse(resp *http.Response) (blobClientDownloadResponse, error) { + result := blobClientDownloadResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.LegalHold = &legalHold + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientDownloadResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + if val := resp.Header.Get("x-ms-error-code"); val != "" { + result.ErrorCode = &val + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// options - blobClientGetAccountInfoOptions contains the optional parameters for the blobClient.GetAccountInfo method. +func (client *blobClient) GetAccountInfo(ctx context.Context, options *blobClientGetAccountInfoOptions) (blobClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return blobClientGetAccountInfoResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *blobClient) getAccountInfoCreateRequest(ctx context.Context, options *blobClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *blobClient) getAccountInfoHandleResponse(resp *http.Response) (blobClientGetAccountInfoResponse, error) { + result := blobClientGetAccountInfoResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + return result, nil +} + +// GetProperties - The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties +// for the blob. It does not return the content of the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientGetPropertiesOptions - blobClientGetPropertiesOptions contains the optional parameters for the blobClient.GetProperties +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) GetProperties(ctx context.Context, blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, blobClientGetPropertiesOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *blobClient) getPropertiesCreateRequest(ctx context.Context, blobClientGetPropertiesOptions *blobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodHead, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientGetPropertiesOptions.Snapshot) + } + if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientGetPropertiesOptions.VersionID) + } + if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientGetPropertiesOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientGetPropertiesOptions != nil && blobClientGetPropertiesOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientGetPropertiesOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *blobClient) getPropertiesHandleResponse(resp *http.Response) (blobClientGetPropertiesResponse, error) { + result := blobClientGetPropertiesResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.CreationTime = &creationTime + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("x-ms-or-policy-id"); val != "" { + result.ObjectReplicationPolicyID = &val + } + for hh := range resp.Header { + if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-or-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-incremental-copy"); val != "" { + isIncrementalCopy, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.IsIncrementalCopy = &isIncrementalCopy + } + if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" { + result.DestinationSnapshot = &val + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-access-tier"); val != "" { + result.AccessTier = &val + } + if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" { + accessTierInferred, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.AccessTierInferred = &accessTierInferred + } + if val := resp.Header.Get("x-ms-archive-status"); val != "" { + result.ArchiveStatus = &val + } + if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" { + accessTierChangeTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.AccessTierChangeTime = &accessTierChangeTime + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("x-ms-is-current-version"); val != "" { + isCurrentVersion, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.IsCurrentVersion = &isCurrentVersion + } + if val := resp.Header.Get("x-ms-tag-count"); val != "" { + tagCount, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.TagCount = &tagCount + } + if val := resp.Header.Get("x-ms-expiry-time"); val != "" { + expiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.ExpiresOn = &expiresOn + } + if val := resp.Header.Get("x-ms-blob-sealed"); val != "" { + isSealed, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.IsSealed = &isSealed + } + if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" { + result.RehydratePriority = &val + } + if val := resp.Header.Get("x-ms-last-access-time"); val != "" { + lastAccessed, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.LastAccessed = &lastAccessed + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return blobClientGetPropertiesResponse{}, err + } + result.LegalHold = &legalHold + } + return result, nil +} + +// GetTags - The Get Tags operation enables users to get the tags associated with a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientGetTagsOptions - blobClientGetTagsOptions contains the optional parameters for the blobClient.GetTags method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) GetTags(ctx context.Context, blobClientGetTagsOptions *blobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientGetTagsResponse, error) { + req, err := client.getTagsCreateRequest(ctx, blobClientGetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return blobClientGetTagsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientGetTagsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientGetTagsResponse{}, runtime.NewResponseError(resp) + } + return client.getTagsHandleResponse(resp) +} + +// getTagsCreateRequest creates the GetTags request. +func (client *blobClient) getTagsCreateRequest(ctx context.Context, blobClientGetTagsOptions *blobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientGetTagsOptions.Timeout), 10)) + } + if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientGetTagsOptions.Snapshot) + } + if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientGetTagsOptions.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientGetTagsOptions != nil && blobClientGetTagsOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientGetTagsOptions.RequestID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getTagsHandleResponse handles the GetTags response. +func (client *blobClient) getTagsHandleResponse(resp *http.Response) (blobClientGetTagsResponse, error) { + result := blobClientGetTagsResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientGetTagsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil { + return blobClientGetTagsResponse{}, err + } + return result, nil +} + +// Query - The Query operation enables users to select/project on blob data by providing simple query expressions. +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientQueryOptions - blobClientQueryOptions contains the optional parameters for the blobClient.Query method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) Query(ctx context.Context, blobClientQueryOptions *blobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientQueryResponse, error) { + req, err := client.queryCreateRequest(ctx, blobClientQueryOptions, leaseAccessConditions, cpkInfo, modifiedAccessConditions) + if err != nil { + return blobClientQueryResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientQueryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) { + return blobClientQueryResponse{}, runtime.NewResponseError(resp) + } + return client.queryHandleResponse(resp) +} + +// queryCreateRequest creates the Query request. +func (client *blobClient) queryCreateRequest(ctx context.Context, blobClientQueryOptions *blobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "query") + if blobClientQueryOptions != nil && blobClientQueryOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientQueryOptions.Snapshot) + } + if blobClientQueryOptions != nil && blobClientQueryOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientQueryOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientQueryOptions != nil && blobClientQueryOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientQueryOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + if blobClientQueryOptions != nil && blobClientQueryOptions.QueryRequest != nil { + return req, runtime.MarshalAsXML(req, *blobClientQueryOptions.QueryRequest) + } + return req, nil +} + +// queryHandleResponse handles the Query response. +func (client *blobClient) queryHandleResponse(resp *http.Response) (blobClientQueryResponse, error) { + result := blobClientQueryResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.LastModified = &lastModified + } + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("Content-Length"); val != "" { + contentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientQueryResponse{}, err + } + result.ContentLength = &contentLength + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("Content-Range"); val != "" { + result.ContentRange = &val + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("Content-Encoding"); val != "" { + result.ContentEncoding = &val + } + if val := resp.Header.Get("Cache-Control"); val != "" { + result.CacheControl = &val + } + if val := resp.Header.Get("Content-Disposition"); val != "" { + result.ContentDisposition = &val + } + if val := resp.Header.Get("Content-Language"); val != "" { + result.ContentLanguage = &val + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientQueryResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-blob-type"); val != "" { + result.BlobType = (*BlobType)(&val) + } + if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" { + copyCompletionTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.CopyCompletionTime = ©CompletionTime + } + if val := resp.Header.Get("x-ms-copy-status-description"); val != "" { + result.CopyStatusDescription = &val + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-progress"); val != "" { + result.CopyProgress = &val + } + if val := resp.Header.Get("x-ms-copy-source"); val != "" { + result.CopySource = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Accept-Ranges"); val != "" { + result.AcceptRanges = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" { + blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32) + blobCommittedBlockCount := int32(blobCommittedBlockCount32) + if err != nil { + return blobClientQueryResponse{}, err + } + result.BlobCommittedBlockCount = &blobCommittedBlockCount + } + if val := resp.Header.Get("x-ms-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" { + blobContentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.BlobContentMD5 = blobContentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + contentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blobClientQueryResponse{}, err + } + result.ContentCRC64 = contentCRC64 + } + return result, nil +} + +// ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// blobClientReleaseLeaseOptions - blobClientReleaseLeaseOptions contains the optional parameters for the blobClient.ReleaseLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) ReleaseLease(ctx context.Context, leaseID string, blobClientReleaseLeaseOptions *blobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, blobClientReleaseLeaseOptions, modifiedAccessConditions) + if err != nil { + return blobClientReleaseLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *blobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, blobClientReleaseLeaseOptions *blobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if blobClientReleaseLeaseOptions != nil && blobClientReleaseLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientReleaseLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "release") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientReleaseLeaseOptions != nil && blobClientReleaseLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientReleaseLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *blobClient) releaseLeaseHandleResponse(resp *http.Response) (blobClientReleaseLeaseResponse, error) { + result := blobClientReleaseLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// blobClientRenewLeaseOptions - blobClientRenewLeaseOptions contains the optional parameters for the blobClient.RenewLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) RenewLease(ctx context.Context, leaseID string, blobClientRenewLeaseOptions *blobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, blobClientRenewLeaseOptions, modifiedAccessConditions) + if err != nil { + return blobClientRenewLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.renewLeaseHandleResponse(resp) +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *blobClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, blobClientRenewLeaseOptions *blobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + if blobClientRenewLeaseOptions != nil && blobClientRenewLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientRenewLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "renew") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientRenewLeaseOptions != nil && blobClientRenewLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientRenewLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *blobClient) renewLeaseHandleResponse(resp *http.Response) (blobClientRenewLeaseResponse, error) { + result := blobClientRenewLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientRenewLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetExpiry - Sets the time a blob will expire and be deleted. +// If the operation fails it returns an *azcore.ResponseError type. +// expiryOptions - Required. Indicates mode of the expiry time +// options - blobClientSetExpiryOptions contains the optional parameters for the blobClient.SetExpiry method. +func (client *blobClient) SetExpiry(ctx context.Context, expiryOptions BlobExpiryOptions, options *blobClientSetExpiryOptions) (blobClientSetExpiryResponse, error) { + req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options) + if err != nil { + return blobClientSetExpiryResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetExpiryResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientSetExpiryResponse{}, runtime.NewResponseError(resp) + } + return client.setExpiryHandleResponse(resp) +} + +// setExpiryCreateRequest creates the SetExpiry request. +func (client *blobClient) setExpiryCreateRequest(ctx context.Context, expiryOptions BlobExpiryOptions, options *blobClientSetExpiryOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "expiry") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("x-ms-expiry-option", string(expiryOptions)) + if options != nil && options.ExpiresOn != nil { + req.Raw().Header.Set("x-ms-expiry-time", *options.ExpiresOn) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setExpiryHandleResponse handles the SetExpiry response. +func (client *blobClient) setExpiryHandleResponse(resp *http.Response) (blobClientSetExpiryResponse, error) { + result := blobClientSetExpiryResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetExpiryResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetExpiryResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientSetHTTPHeadersOptions - blobClientSetHTTPHeadersOptions contains the optional parameters for the blobClient.SetHTTPHeaders +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) SetHTTPHeaders(ctx context.Context, blobClientSetHTTPHeadersOptions *blobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetHTTPHeadersResponse, error) { + req, err := client.setHTTPHeadersCreateRequest(ctx, blobClientSetHTTPHeadersOptions, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return blobClientSetHTTPHeadersResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetHTTPHeadersResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp) + } + return client.setHTTPHeadersHandleResponse(resp) +} + +// setHTTPHeadersCreateRequest creates the SetHTTPHeaders request. +func (client *blobClient) setHTTPHeadersCreateRequest(ctx context.Context, blobClientSetHTTPHeadersOptions *blobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if blobClientSetHTTPHeadersOptions != nil && blobClientSetHTTPHeadersOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetHTTPHeadersOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientSetHTTPHeadersOptions != nil && blobClientSetHTTPHeadersOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetHTTPHeadersOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setHTTPHeadersHandleResponse handles the SetHTTPHeaders response. +func (client *blobClient) setHTTPHeadersHandleResponse(resp *http.Response) (blobClientSetHTTPHeadersResponse, error) { + result := blobClientSetHTTPHeadersResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetHTTPHeadersResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blobClientSetHTTPHeadersResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetHTTPHeadersResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientSetImmutabilityPolicyOptions - blobClientSetImmutabilityPolicyOptions contains the optional parameters for the +// blobClient.SetImmutabilityPolicy method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) SetImmutabilityPolicy(ctx context.Context, blobClientSetImmutabilityPolicyOptions *blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetImmutabilityPolicyResponse, error) { + req, err := client.setImmutabilityPolicyCreateRequest(ctx, blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions) + if err != nil { + return blobClientSetImmutabilityPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetImmutabilityPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.setImmutabilityPolicyHandleResponse(resp) +} + +// setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request. +func (client *blobClient) setImmutabilityPolicyCreateRequest(ctx context.Context, blobClientSetImmutabilityPolicyOptions *blobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "immutabilityPolicies") + if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetImmutabilityPolicyOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetImmutabilityPolicyOptions.RequestID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if blobClientSetImmutabilityPolicyOptions != nil && blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientSetImmutabilityPolicyOptions.ImmutabilityPolicyMode)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setImmutabilityPolicyHandleResponse handles the SetImmutabilityPolicy response. +func (client *blobClient) setImmutabilityPolicyHandleResponse(resp *http.Response) (blobClientSetImmutabilityPolicyResponse, error) { + result := blobClientSetImmutabilityPolicyResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetImmutabilityPolicyResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" { + immutabilityPolicyExpiry, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetImmutabilityPolicyResponse{}, err + } + result.ImmutabilityPolicyExpiry = &immutabilityPolicyExpiry + } + if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" { + result.ImmutabilityPolicyMode = (*BlobImmutabilityPolicyMode)(&val) + } + return result, nil +} + +// SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. +// If the operation fails it returns an *azcore.ResponseError type. +// legalHold - Specified if a legal hold should be set on the blob. +// options - blobClientSetLegalHoldOptions contains the optional parameters for the blobClient.SetLegalHold method. +func (client *blobClient) SetLegalHold(ctx context.Context, legalHold bool, options *blobClientSetLegalHoldOptions) (blobClientSetLegalHoldResponse, error) { + req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options) + if err != nil { + return blobClientSetLegalHoldResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetLegalHoldResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp) + } + return client.setLegalHoldHandleResponse(resp) +} + +// setLegalHoldCreateRequest creates the SetLegalHold request. +func (client *blobClient) setLegalHoldCreateRequest(ctx context.Context, legalHold bool, options *blobClientSetLegalHoldOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "legalhold") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(legalHold)) + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setLegalHoldHandleResponse handles the SetLegalHold response. +func (client *blobClient) setLegalHoldHandleResponse(resp *http.Response) (blobClientSetLegalHoldResponse, error) { + result := blobClientSetLegalHoldResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetLegalHoldResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-legal-hold"); val != "" { + legalHold, err := strconv.ParseBool(val) + if err != nil { + return blobClientSetLegalHoldResponse{}, err + } + result.LegalHold = &legalHold + } + return result, nil +} + +// SetMetadata - The Set Blob Metadata operation sets user-defined metadata for the specified blob as one or more name-value +// pairs +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientSetMetadataOptions - blobClientSetMetadataOptions contains the optional parameters for the blobClient.SetMetadata +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) SetMetadata(ctx context.Context, blobClientSetMetadataOptions *blobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, blobClientSetMetadataOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return blobClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *blobClient) setMetadataCreateRequest(ctx context.Context, blobClientSetMetadataOptions *blobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "metadata") + if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetMetadataOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.Metadata != nil { + for k, v := range blobClientSetMetadataOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientSetMetadataOptions != nil && blobClientSetMetadataOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetMetadataOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *blobClient) setMetadataHandleResponse(resp *http.Response) (blobClientSetMetadataResponse, error) { + result := blobClientSetMetadataResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetMetadataResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blobClientSetMetadataResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// SetTags - The Set Tags operation enables users to set tags on a blob. +// If the operation fails it returns an *azcore.ResponseError type. +// blobClientSetTagsOptions - blobClientSetTagsOptions contains the optional parameters for the blobClient.SetTags method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) SetTags(ctx context.Context, blobClientSetTagsOptions *blobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientSetTagsResponse, error) { + req, err := client.setTagsCreateRequest(ctx, blobClientSetTagsOptions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return blobClientSetTagsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetTagsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusNoContent) { + return blobClientSetTagsResponse{}, runtime.NewResponseError(resp) + } + return client.setTagsHandleResponse(resp) +} + +// setTagsCreateRequest creates the SetTags request. +func (client *blobClient) setTagsCreateRequest(ctx context.Context, blobClientSetTagsOptions *blobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tags") + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetTagsOptions.Timeout), 10)) + } + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientSetTagsOptions.VersionID) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blobClientSetTagsOptions.TransactionalContentMD5)) + } + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.TransactionalContentCRC64 != nil { + req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blobClientSetTagsOptions.TransactionalContentCRC64)) + } + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetTagsOptions.RequestID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("Accept", "application/xml") + if blobClientSetTagsOptions != nil && blobClientSetTagsOptions.Tags != nil { + return req, runtime.MarshalAsXML(req, *blobClientSetTagsOptions.Tags) + } + return req, nil +} + +// setTagsHandleResponse handles the SetTags response. +func (client *blobClient) setTagsHandleResponse(resp *http.Response) (blobClientSetTagsResponse, error) { + result := blobClientSetTagsResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientSetTagsResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetTier - The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage +// account and on a block blob in a blob storage account (locally redundant storage only). A +// premium page blob's tier determines the allowed size, IOPS, and bandwidth of the blob. A block blob's tier determines Hot/Cool/Archive +// storage type. This operation does not update the blob's ETag. +// If the operation fails it returns an *azcore.ResponseError type. +// tier - Indicates the tier to be set on the blob. +// blobClientSetTierOptions - blobClientSetTierOptions contains the optional parameters for the blobClient.SetTier method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blobClient) SetTier(ctx context.Context, tier AccessTier, blobClientSetTierOptions *blobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blobClientSetTierResponse, error) { + req, err := client.setTierCreateRequest(ctx, tier, blobClientSetTierOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return blobClientSetTierResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientSetTierResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) { + return blobClientSetTierResponse{}, runtime.NewResponseError(resp) + } + return client.setTierHandleResponse(resp) +} + +// setTierCreateRequest creates the SetTier request. +func (client *blobClient) setTierCreateRequest(ctx context.Context, tier AccessTier, blobClientSetTierOptions *blobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "tier") + if blobClientSetTierOptions != nil && blobClientSetTierOptions.Snapshot != nil { + reqQP.Set("snapshot", *blobClientSetTierOptions.Snapshot) + } + if blobClientSetTierOptions != nil && blobClientSetTierOptions.VersionID != nil { + reqQP.Set("versionid", *blobClientSetTierOptions.VersionID) + } + if blobClientSetTierOptions != nil && blobClientSetTierOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientSetTierOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-access-tier", string(tier)) + if blobClientSetTierOptions != nil && blobClientSetTierOptions.RehydratePriority != nil { + req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobClientSetTierOptions.RehydratePriority)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientSetTierOptions != nil && blobClientSetTierOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientSetTierOptions.RequestID) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setTierHandleResponse handles the SetTier response. +func (client *blobClient) setTierHandleResponse(resp *http.Response) (blobClientSetTierResponse, error) { + result := blobClientSetTierResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. +// If the operation fails it returns an *azcore.ResponseError type. +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// blobClientStartCopyFromURLOptions - blobClientStartCopyFromURLOptions contains the optional parameters for the blobClient.StartCopyFromURL +// method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *blobClient) StartCopyFromURL(ctx context.Context, copySource string, blobClientStartCopyFromURLOptions *blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (blobClientStartCopyFromURLResponse, error) { + req, err := client.startCopyFromURLCreateRequest(ctx, copySource, blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) + if err != nil { + return blobClientStartCopyFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientStartCopyFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return blobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.startCopyFromURLHandleResponse(resp) +} + +// startCopyFromURLCreateRequest creates the StartCopyFromURL request. +func (client *blobClient) startCopyFromURLCreateRequest(ctx context.Context, copySource string, blobClientStartCopyFromURLOptions *blobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blobClientStartCopyFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Metadata != nil { + for k, v := range blobClientStartCopyFromURLOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*blobClientStartCopyFromURLOptions.Tier)) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.RehydratePriority != nil { + req.Raw().Header.Set("x-ms-rehydrate-priority", string(*blobClientStartCopyFromURLOptions.RehydratePriority)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header.Set("x-ms-source-if-tags", *sourceModifiedAccessConditions.SourceIfTags) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-copy-source", copySource) + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blobClientStartCopyFromURLOptions.RequestID) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *blobClientStartCopyFromURLOptions.BlobTagsString) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.SealBlob != nil { + req.Raw().Header.Set("x-ms-seal-blob", strconv.FormatBool(*blobClientStartCopyFromURLOptions.SealBlob)) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", blobClientStartCopyFromURLOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blobClientStartCopyFromURLOptions.ImmutabilityPolicyMode)) + } + if blobClientStartCopyFromURLOptions != nil && blobClientStartCopyFromURLOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blobClientStartCopyFromURLOptions.LegalHold)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// startCopyFromURLHandleResponse handles the StartCopyFromURL response. +func (client *blobClient) startCopyFromURLHandleResponse(resp *http.Response) (blobClientStartCopyFromURLResponse, error) { + result := blobClientStartCopyFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientStartCopyFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientStartCopyFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// Undelete - Undelete a blob that was previously soft deleted +// If the operation fails it returns an *azcore.ResponseError type. +// options - blobClientUndeleteOptions contains the optional parameters for the blobClient.Undelete method. +func (client *blobClient) Undelete(ctx context.Context, options *blobClientUndeleteOptions) (blobClientUndeleteResponse, error) { + req, err := client.undeleteCreateRequest(ctx, options) + if err != nil { + return blobClientUndeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blobClientUndeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blobClientUndeleteResponse{}, runtime.NewResponseError(resp) + } + return client.undeleteHandleResponse(resp) +} + +// undeleteCreateRequest creates the Undelete request. +func (client *blobClient) undeleteCreateRequest(ctx context.Context, options *blobClientUndeleteOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// undeleteHandleResponse handles the Undelete response. +func (client *blobClient) undeleteHandleResponse(resp *http.Response) (blobClientUndeleteResponse, error) { + result := blobClientUndeleteResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blobClientUndeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go new file mode 100644 index 000000000..3f78a28aa --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_blockblob_client.go @@ -0,0 +1,953 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +type blockBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// newBlockBlobClient creates a new instance of blockBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newBlockBlobClient(endpoint string, pl runtime.Pipeline) *blockBlobClient { + client := &blockBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. +// In order to be written as part of a blob, a block must have been successfully written to the +// server in a prior Put Block operation. You can call Put Block List to update a blob by uploading only those blocks that +// have changed, then committing the new and existing blocks together. You can do +// this by specifying whether to commit a block from the committed block list or from the uncommitted block list, or to commit +// the most recently uploaded version of the block, whichever list it may +// belong to. +// If the operation fails it returns an *azcore.ResponseError type. +// blocks - Blob Blocks. +// blockBlobClientCommitBlockListOptions - blockBlobClientCommitBlockListOptions contains the optional parameters for the +// blockBlobClient.CommitBlockList method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, blockBlobClientCommitBlockListOptions *blockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientCommitBlockListResponse, error) { + req, err := client.commitBlockListCreateRequest(ctx, blocks, blockBlobClientCommitBlockListOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.commitBlockListHandleResponse(resp) +} + +// commitBlockListCreateRequest creates the CommitBlockList request. +func (client *blockBlobClient) commitBlockListCreateRequest(ctx context.Context, blocks BlockLookupList, blockBlobClientCommitBlockListOptions *blockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientCommitBlockListOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientCommitBlockListOptions.TransactionalContentMD5)) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.TransactionalContentCRC64 != nil { + req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientCommitBlockListOptions.TransactionalContentCRC64)) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Metadata != nil { + for k, v := range blockBlobClientCommitBlockListOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientCommitBlockListOptions.Tier)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientCommitBlockListOptions.RequestID) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *blockBlobClientCommitBlockListOptions.BlobTagsString) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", blockBlobClientCommitBlockListOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blockBlobClientCommitBlockListOptions.ImmutabilityPolicyMode)) + } + if blockBlobClientCommitBlockListOptions != nil && blockBlobClientCommitBlockListOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blockBlobClientCommitBlockListOptions.LegalHold)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, runtime.MarshalAsXML(req, blocks) +} + +// commitBlockListHandleResponse handles the CommitBlockList response. +func (client *blockBlobClient) commitBlockListHandleResponse(resp *http.Response) (blockBlobClientCommitBlockListResponse, error) { + result := blockBlobClientCommitBlockListResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blockBlobClientCommitBlockListResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob +// If the operation fails it returns an *azcore.ResponseError type. +// listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. +// blockBlobClientGetBlockListOptions - blockBlobClientGetBlockListOptions contains the optional parameters for the blockBlobClient.GetBlockList +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, blockBlobClientGetBlockListOptions *blockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientGetBlockListResponse, error) { + req, err := client.getBlockListCreateRequest(ctx, listType, blockBlobClientGetBlockListOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return blockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp) + } + return client.getBlockListHandleResponse(resp) +} + +// getBlockListCreateRequest creates the GetBlockList request. +func (client *blockBlobClient) getBlockListCreateRequest(ctx context.Context, listType BlockListType, blockBlobClientGetBlockListOptions *blockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blocklist") + if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.Snapshot != nil { + reqQP.Set("snapshot", *blockBlobClientGetBlockListOptions.Snapshot) + } + reqQP.Set("blocklisttype", string(listType)) + if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientGetBlockListOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientGetBlockListOptions != nil && blockBlobClientGetBlockListOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientGetBlockListOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getBlockListHandleResponse handles the GetBlockList response. +func (client *blockBlobClient) getBlockListHandleResponse(resp *http.Response) (blockBlobClientGetBlockListResponse, error) { + result := blockBlobClientGetBlockListResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil { + return blockBlobClientGetBlockListResponse{}, err + } + return result, nil +} + +// PutBlobFromURL - The Put Blob from URL operation creates a new Block Blob where the contents of the blob are read from +// a given URL. This API is supported beginning with the 2020-04-08 version. Partial updates are not +// supported with Put Blob from URL; the content of an existing blob is overwritten with the content of the new blob. To perform +// partial updates to a block blob’s contents using a source URL, use the Put +// Block from URL API in conjunction with Put Block List. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// blockBlobClientPutBlobFromURLOptions - blockBlobClientPutBlobFromURLOptions contains the optional parameters for the blockBlobClient.PutBlobFromURL +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +func (client *blockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, blockBlobClientPutBlobFromURLOptions *blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (blockBlobClientPutBlobFromURLResponse, error) { + req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.putBlobFromURLHandleResponse(resp) +} + +// putBlobFromURLCreateRequest creates the PutBlobFromURL request. +func (client *blockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, contentLength int64, copySource string, blockBlobClientPutBlobFromURLOptions *blockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientPutBlobFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-blob-type", "BlockBlob") + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientPutBlobFromURLOptions.TransactionalContentMD5)) + } + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Metadata != nil { + for k, v := range blockBlobClientPutBlobFromURLOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientPutBlobFromURLOptions.Tier)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { + req.Raw().Header.Set("x-ms-source-if-tags", *sourceModifiedAccessConditions.SourceIfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientPutBlobFromURLOptions.RequestID) + } + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.SourceContentMD5 != nil { + req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blockBlobClientPutBlobFromURLOptions.SourceContentMD5)) + } + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *blockBlobClientPutBlobFromURLOptions.BlobTagsString) + } + req.Raw().Header.Set("x-ms-copy-source", copySource) + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.CopySourceBlobProperties != nil { + req.Raw().Header.Set("x-ms-copy-source-blob-properties", strconv.FormatBool(*blockBlobClientPutBlobFromURLOptions.CopySourceBlobProperties)) + } + if blockBlobClientPutBlobFromURLOptions != nil && blockBlobClientPutBlobFromURLOptions.CopySourceAuthorization != nil { + req.Raw().Header.Set("x-ms-copy-source-authorization", *blockBlobClientPutBlobFromURLOptions.CopySourceAuthorization) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// putBlobFromURLHandleResponse handles the PutBlobFromURL response. +func (client *blockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (blockBlobClientPutBlobFromURLResponse, error) { + result := blockBlobClientPutBlobFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blockBlobClientPutBlobFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob +// If the operation fails it returns an *azcore.ResponseError type. +// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// contentLength - The length of the request. +// body - Initial data +// blockBlobClientStageBlockOptions - blockBlobClientStageBlockOptions contains the optional parameters for the blockBlobClient.StageBlock +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +func (client *blockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobClientStageBlockOptions *blockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (blockBlobClientStageBlockResponse, error) { + req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, blockBlobClientStageBlockOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockHandleResponse(resp) +} + +// stageBlockCreateRequest creates the StageBlock request. +func (client *blockBlobClient) stageBlockCreateRequest(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, blockBlobClientStageBlockOptions *blockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientStageBlockOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockOptions.TransactionalContentMD5)) + } + if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.TransactionalContentCRC64 != nil { + req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockOptions.TransactionalContentCRC64)) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientStageBlockOptions != nil && blockBlobClientStageBlockOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientStageBlockOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, req.SetBody(body, "application/octet-stream") +} + +// stageBlockHandleResponse handles the StageBlock response. +func (client *blockBlobClient) stageBlockHandleResponse(resp *http.Response) (blockBlobClientStageBlockResponse, error) { + result := blockBlobClientStageBlockResponse{RawResponse: resp} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blockBlobClientStageBlockResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// StageBlockFromURL - The Stage Block operation creates a new block to be committed as part of a blob where the contents +// are read from a URL. +// If the operation fails it returns an *azcore.ResponseError type. +// blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal +// to 64 bytes in size. For a given blob, the length of the value specified for the blockid +// parameter must be the same size for each block. +// contentLength - The length of the request. +// sourceURL - Specify a URL to the copy source. +// blockBlobClientStageBlockFromURLOptions - blockBlobClientStageBlockFromURLOptions contains the optional parameters for +// the blockBlobClient.StageBlockFromURL method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +func (client *blockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobClientStageBlockFromURLOptions *blockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (blockBlobClientStageBlockFromURLResponse, error) { + req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, blockBlobClientStageBlockFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.stageBlockFromURLHandleResponse(resp) +} + +// stageBlockFromURLCreateRequest creates the StageBlockFromURL request. +func (client *blockBlobClient) stageBlockFromURLCreateRequest(ctx context.Context, blockID string, contentLength int64, sourceURL string, blockBlobClientStageBlockFromURLOptions *blockBlobClientStageBlockFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "block") + reqQP.Set("blockid", blockID) + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientStageBlockFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Raw().Header.Set("x-ms-copy-source", sourceURL) + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceRange != nil { + req.Raw().Header.Set("x-ms-source-range", *blockBlobClientStageBlockFromURLOptions.SourceRange) + } + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceContentMD5 != nil { + req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockFromURLOptions.SourceContentMD5)) + } + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.SourceContentcrc64 != nil { + req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(blockBlobClientStageBlockFromURLOptions.SourceContentcrc64)) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientStageBlockFromURLOptions.RequestID) + } + if blockBlobClientStageBlockFromURLOptions != nil && blockBlobClientStageBlockFromURLOptions.CopySourceAuthorization != nil { + req.Raw().Header.Set("x-ms-copy-source-authorization", *blockBlobClientStageBlockFromURLOptions.CopySourceAuthorization) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// stageBlockFromURLHandleResponse handles the StageBlockFromURL response. +func (client *blockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (blockBlobClientStageBlockFromURLResponse, error) { + result := blockBlobClientStageBlockFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blockBlobClientStageBlockFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// Upload - The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob +// overwrites any existing metadata on the blob. Partial updates are not supported with Put +// Blob; the content of the existing blob is overwritten with the content of the new blob. To perform a partial update of +// the content of a block blob, use the Put Block List operation. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// body - Initial data +// blockBlobClientUploadOptions - blockBlobClientUploadOptions contains the optional parameters for the blockBlobClient.Upload +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *blockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobClientUploadOptions *blockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (blockBlobClientUploadResponse, error) { + req, err := client.uploadCreateRequest(ctx, contentLength, body, blockBlobClientUploadOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return blockBlobClientUploadResponse{}, runtime.NewResponseError(resp) + } + return client.uploadHandleResponse(resp) +} + +// uploadCreateRequest creates the Upload request. +func (client *blockBlobClient) uploadCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, blockBlobClientUploadOptions *blockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*blockBlobClientUploadOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-blob-type", "BlockBlob") + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(blockBlobClientUploadOptions.TransactionalContentMD5)) + } + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Metadata != nil { + for k, v := range blockBlobClientUploadOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*blockBlobClientUploadOptions.Tier)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *blockBlobClientUploadOptions.RequestID) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *blockBlobClientUploadOptions.BlobTagsString) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", blockBlobClientUploadOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*blockBlobClientUploadOptions.ImmutabilityPolicyMode)) + } + if blockBlobClientUploadOptions != nil && blockBlobClientUploadOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*blockBlobClientUploadOptions.LegalHold)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, req.SetBody(body, "application/octet-stream") +} + +// uploadHandleResponse handles the Upload response. +func (client *blockBlobClient) uploadHandleResponse(resp *http.Response) (blockBlobClientUploadResponse, error) { + result := blockBlobClientUploadResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return blockBlobClientUploadResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go new file mode 100644 index 000000000..2348df04a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_constants.go @@ -0,0 +1,841 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +const ( + moduleName = "azblob" + moduleVersion = "v0.4.1" +) + +// AccessTier enum +type AccessTier string + +const ( + AccessTierArchive AccessTier = "Archive" + AccessTierCool AccessTier = "Cool" + AccessTierHot AccessTier = "Hot" + AccessTierP10 AccessTier = "P10" + AccessTierP15 AccessTier = "P15" + AccessTierP20 AccessTier = "P20" + AccessTierP30 AccessTier = "P30" + AccessTierP4 AccessTier = "P4" + AccessTierP40 AccessTier = "P40" + AccessTierP50 AccessTier = "P50" + AccessTierP6 AccessTier = "P6" + AccessTierP60 AccessTier = "P60" + AccessTierP70 AccessTier = "P70" + AccessTierP80 AccessTier = "P80" +) + +// PossibleAccessTierValues returns the possible values for the AccessTier const type. +func PossibleAccessTierValues() []AccessTier { + return []AccessTier{ + AccessTierArchive, + AccessTierCool, + AccessTierHot, + AccessTierP10, + AccessTierP15, + AccessTierP20, + AccessTierP30, + AccessTierP4, + AccessTierP40, + AccessTierP50, + AccessTierP6, + AccessTierP60, + AccessTierP70, + AccessTierP80, + } +} + +// ToPtr returns a *AccessTier pointing to the current value. +func (c AccessTier) ToPtr() *AccessTier { + return &c +} + +// AccountKind enum +type AccountKind string + +const ( + AccountKindStorage AccountKind = "Storage" + AccountKindBlobStorage AccountKind = "BlobStorage" + AccountKindStorageV2 AccountKind = "StorageV2" + AccountKindFileStorage AccountKind = "FileStorage" + AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage" +) + +// PossibleAccountKindValues returns the possible values for the AccountKind const type. +func PossibleAccountKindValues() []AccountKind { + return []AccountKind{ + AccountKindStorage, + AccountKindBlobStorage, + AccountKindStorageV2, + AccountKindFileStorage, + AccountKindBlockBlobStorage, + } +} + +// ToPtr returns a *AccountKind pointing to the current value. +func (c AccountKind) ToPtr() *AccountKind { + return &c +} + +// ArchiveStatus enum +type ArchiveStatus string + +const ( + ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" + ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" +) + +// PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. +func PossibleArchiveStatusValues() []ArchiveStatus { + return []ArchiveStatus{ + ArchiveStatusRehydratePendingToCool, + ArchiveStatusRehydratePendingToHot, + } +} + +// ToPtr returns a *ArchiveStatus pointing to the current value. +func (c ArchiveStatus) ToPtr() *ArchiveStatus { + return &c +} + +// BlobExpiryOptions enum +type BlobExpiryOptions string + +const ( + BlobExpiryOptionsAbsolute BlobExpiryOptions = "Absolute" + BlobExpiryOptionsNeverExpire BlobExpiryOptions = "NeverExpire" + BlobExpiryOptionsRelativeToCreation BlobExpiryOptions = "RelativeToCreation" + BlobExpiryOptionsRelativeToNow BlobExpiryOptions = "RelativeToNow" +) + +// PossibleBlobExpiryOptionsValues returns the possible values for the BlobExpiryOptions const type. +func PossibleBlobExpiryOptionsValues() []BlobExpiryOptions { + return []BlobExpiryOptions{ + BlobExpiryOptionsAbsolute, + BlobExpiryOptionsNeverExpire, + BlobExpiryOptionsRelativeToCreation, + BlobExpiryOptionsRelativeToNow, + } +} + +// ToPtr returns a *BlobExpiryOptions pointing to the current value. +func (c BlobExpiryOptions) ToPtr() *BlobExpiryOptions { + return &c +} + +// BlobGeoReplicationStatus - The status of the secondary location +type BlobGeoReplicationStatus string + +const ( + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" + BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap" + BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable" +) + +// PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. +func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { + return []BlobGeoReplicationStatus{ + BlobGeoReplicationStatusLive, + BlobGeoReplicationStatusBootstrap, + BlobGeoReplicationStatusUnavailable, + } +} + +// ToPtr returns a *BlobGeoReplicationStatus pointing to the current value. +func (c BlobGeoReplicationStatus) ToPtr() *BlobGeoReplicationStatus { + return &c +} + +// BlobImmutabilityPolicyMode enum +type BlobImmutabilityPolicyMode string + +const ( + BlobImmutabilityPolicyModeMutable BlobImmutabilityPolicyMode = "Mutable" + BlobImmutabilityPolicyModeUnlocked BlobImmutabilityPolicyMode = "Unlocked" + BlobImmutabilityPolicyModeLocked BlobImmutabilityPolicyMode = "Locked" +) + +// PossibleBlobImmutabilityPolicyModeValues returns the possible values for the BlobImmutabilityPolicyMode const type. +func PossibleBlobImmutabilityPolicyModeValues() []BlobImmutabilityPolicyMode { + return []BlobImmutabilityPolicyMode{ + BlobImmutabilityPolicyModeMutable, + BlobImmutabilityPolicyModeUnlocked, + BlobImmutabilityPolicyModeLocked, + } +} + +// ToPtr returns a *BlobImmutabilityPolicyMode pointing to the current value. +func (c BlobImmutabilityPolicyMode) ToPtr() *BlobImmutabilityPolicyMode { + return &c +} + +// BlobType enum +type BlobType string + +const ( + BlobTypeBlockBlob BlobType = "BlockBlob" + BlobTypePageBlob BlobType = "PageBlob" + BlobTypeAppendBlob BlobType = "AppendBlob" +) + +// PossibleBlobTypeValues returns the possible values for the BlobType const type. +func PossibleBlobTypeValues() []BlobType { + return []BlobType{ + BlobTypeBlockBlob, + BlobTypePageBlob, + BlobTypeAppendBlob, + } +} + +// ToPtr returns a *BlobType pointing to the current value. +func (c BlobType) ToPtr() *BlobType { + return &c +} + +// BlockListType enum +type BlockListType string + +const ( + BlockListTypeCommitted BlockListType = "committed" + BlockListTypeUncommitted BlockListType = "uncommitted" + BlockListTypeAll BlockListType = "all" +) + +// PossibleBlockListTypeValues returns the possible values for the BlockListType const type. +func PossibleBlockListTypeValues() []BlockListType { + return []BlockListType{ + BlockListTypeCommitted, + BlockListTypeUncommitted, + BlockListTypeAll, + } +} + +// ToPtr returns a *BlockListType pointing to the current value. +func (c BlockListType) ToPtr() *BlockListType { + return &c +} + +// CopyStatusType enum +type CopyStatusType string + +const ( + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" + CopyStatusTypeAborted CopyStatusType = "aborted" + CopyStatusTypeFailed CopyStatusType = "failed" +) + +// PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. +func PossibleCopyStatusTypeValues() []CopyStatusType { + return []CopyStatusType{ + CopyStatusTypePending, + CopyStatusTypeSuccess, + CopyStatusTypeAborted, + CopyStatusTypeFailed, + } +} + +// ToPtr returns a *CopyStatusType pointing to the current value. +func (c CopyStatusType) ToPtr() *CopyStatusType { + return &c +} + +// DeleteSnapshotsOptionType enum +type DeleteSnapshotsOptionType string + +const ( + DeleteSnapshotsOptionTypeInclude DeleteSnapshotsOptionType = "include" + DeleteSnapshotsOptionTypeOnly DeleteSnapshotsOptionType = "only" +) + +// PossibleDeleteSnapshotsOptionTypeValues returns the possible values for the DeleteSnapshotsOptionType const type. +func PossibleDeleteSnapshotsOptionTypeValues() []DeleteSnapshotsOptionType { + return []DeleteSnapshotsOptionType{ + DeleteSnapshotsOptionTypeInclude, + DeleteSnapshotsOptionTypeOnly, + } +} + +// ToPtr returns a *DeleteSnapshotsOptionType pointing to the current value. +func (c DeleteSnapshotsOptionType) ToPtr() *DeleteSnapshotsOptionType { + return &c +} + +// EncryptionAlgorithmType enum +type EncryptionAlgorithmType string + +const ( + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" + EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" +) + +// PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. +func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { + return []EncryptionAlgorithmType{ + EncryptionAlgorithmTypeNone, + EncryptionAlgorithmTypeAES256, + } +} + +// ToPtr returns a *EncryptionAlgorithmType pointing to the current value. +func (c EncryptionAlgorithmType) ToPtr() *EncryptionAlgorithmType { + return &c +} + +// LeaseDurationType enum +type LeaseDurationType string + +const ( + LeaseDurationTypeInfinite LeaseDurationType = "infinite" + LeaseDurationTypeFixed LeaseDurationType = "fixed" +) + +// PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. +func PossibleLeaseDurationTypeValues() []LeaseDurationType { + return []LeaseDurationType{ + LeaseDurationTypeInfinite, + LeaseDurationTypeFixed, + } +} + +// ToPtr returns a *LeaseDurationType pointing to the current value. +func (c LeaseDurationType) ToPtr() *LeaseDurationType { + return &c +} + +// LeaseStateType enum +type LeaseStateType string + +const ( + LeaseStateTypeAvailable LeaseStateType = "available" + LeaseStateTypeLeased LeaseStateType = "leased" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeBreaking LeaseStateType = "breaking" + LeaseStateTypeBroken LeaseStateType = "broken" +) + +// PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. +func PossibleLeaseStateTypeValues() []LeaseStateType { + return []LeaseStateType{ + LeaseStateTypeAvailable, + LeaseStateTypeLeased, + LeaseStateTypeExpired, + LeaseStateTypeBreaking, + LeaseStateTypeBroken, + } +} + +// ToPtr returns a *LeaseStateType pointing to the current value. +func (c LeaseStateType) ToPtr() *LeaseStateType { + return &c +} + +// LeaseStatusType enum +type LeaseStatusType string + +const ( + LeaseStatusTypeLocked LeaseStatusType = "locked" + LeaseStatusTypeUnlocked LeaseStatusType = "unlocked" +) + +// PossibleLeaseStatusTypeValues returns the possible values for the LeaseStatusType const type. +func PossibleLeaseStatusTypeValues() []LeaseStatusType { + return []LeaseStatusType{ + LeaseStatusTypeLocked, + LeaseStatusTypeUnlocked, + } +} + +// ToPtr returns a *LeaseStatusType pointing to the current value. +func (c LeaseStatusType) ToPtr() *LeaseStatusType { + return &c +} + +// ListBlobsIncludeItem enum +type ListBlobsIncludeItem string + +const ( + ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" + ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" + ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" + ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" + ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" + ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" + ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" + ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" +) + +// PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. +func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { + return []ListBlobsIncludeItem{ + ListBlobsIncludeItemCopy, + ListBlobsIncludeItemDeleted, + ListBlobsIncludeItemMetadata, + ListBlobsIncludeItemSnapshots, + ListBlobsIncludeItemUncommittedblobs, + ListBlobsIncludeItemVersions, + ListBlobsIncludeItemTags, + ListBlobsIncludeItemImmutabilitypolicy, + ListBlobsIncludeItemLegalhold, + ListBlobsIncludeItemDeletedwithversions, + } +} + +// ToPtr returns a *ListBlobsIncludeItem pointing to the current value. +func (c ListBlobsIncludeItem) ToPtr() *ListBlobsIncludeItem { + return &c +} + +// ListContainersIncludeType enum +type ListContainersIncludeType string + +const ( + ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" + ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted" + ListContainersIncludeTypeSystem ListContainersIncludeType = "system" +) + +// PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. +func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { + return []ListContainersIncludeType{ + ListContainersIncludeTypeMetadata, + ListContainersIncludeTypeDeleted, + ListContainersIncludeTypeSystem, + } +} + +// ToPtr returns a *ListContainersIncludeType pointing to the current value. +func (c ListContainersIncludeType) ToPtr() *ListContainersIncludeType { + return &c +} + +// PremiumPageBlobAccessTier enum +type PremiumPageBlobAccessTier string + +const ( + PremiumPageBlobAccessTierP10 PremiumPageBlobAccessTier = "P10" + PremiumPageBlobAccessTierP15 PremiumPageBlobAccessTier = "P15" + PremiumPageBlobAccessTierP20 PremiumPageBlobAccessTier = "P20" + PremiumPageBlobAccessTierP30 PremiumPageBlobAccessTier = "P30" + PremiumPageBlobAccessTierP4 PremiumPageBlobAccessTier = "P4" + PremiumPageBlobAccessTierP40 PremiumPageBlobAccessTier = "P40" + PremiumPageBlobAccessTierP50 PremiumPageBlobAccessTier = "P50" + PremiumPageBlobAccessTierP6 PremiumPageBlobAccessTier = "P6" + PremiumPageBlobAccessTierP60 PremiumPageBlobAccessTier = "P60" + PremiumPageBlobAccessTierP70 PremiumPageBlobAccessTier = "P70" + PremiumPageBlobAccessTierP80 PremiumPageBlobAccessTier = "P80" +) + +// PossiblePremiumPageBlobAccessTierValues returns the possible values for the PremiumPageBlobAccessTier const type. +func PossiblePremiumPageBlobAccessTierValues() []PremiumPageBlobAccessTier { + return []PremiumPageBlobAccessTier{ + PremiumPageBlobAccessTierP10, + PremiumPageBlobAccessTierP15, + PremiumPageBlobAccessTierP20, + PremiumPageBlobAccessTierP30, + PremiumPageBlobAccessTierP4, + PremiumPageBlobAccessTierP40, + PremiumPageBlobAccessTierP50, + PremiumPageBlobAccessTierP6, + PremiumPageBlobAccessTierP60, + PremiumPageBlobAccessTierP70, + PremiumPageBlobAccessTierP80, + } +} + +// ToPtr returns a *PremiumPageBlobAccessTier pointing to the current value. +func (c PremiumPageBlobAccessTier) ToPtr() *PremiumPageBlobAccessTier { + return &c +} + +// PublicAccessType enum +type PublicAccessType string + +const ( + PublicAccessTypeBlob PublicAccessType = "blob" + PublicAccessTypeContainer PublicAccessType = "container" +) + +// PossiblePublicAccessTypeValues returns the possible values for the PublicAccessType const type. +func PossiblePublicAccessTypeValues() []PublicAccessType { + return []PublicAccessType{ + PublicAccessTypeBlob, + PublicAccessTypeContainer, + } +} + +// ToPtr returns a *PublicAccessType pointing to the current value. +func (c PublicAccessType) ToPtr() *PublicAccessType { + return &c +} + +// QueryFormatType - The quick query format type. +type QueryFormatType string + +const ( + QueryFormatTypeDelimited QueryFormatType = "delimited" + QueryFormatTypeJSON QueryFormatType = "json" + QueryFormatTypeArrow QueryFormatType = "arrow" + QueryFormatTypeParquet QueryFormatType = "parquet" +) + +// PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. +func PossibleQueryFormatTypeValues() []QueryFormatType { + return []QueryFormatType{ + QueryFormatTypeDelimited, + QueryFormatTypeJSON, + QueryFormatTypeArrow, + QueryFormatTypeParquet, + } +} + +// ToPtr returns a *QueryFormatType pointing to the current value. +func (c QueryFormatType) ToPtr() *QueryFormatType { + return &c +} + +// RehydratePriority - If an object is in rehydrate pending state then this header is returned with priority of rehydrate. +// Valid values are High and Standard. +type RehydratePriority string + +const ( + RehydratePriorityHigh RehydratePriority = "High" + RehydratePriorityStandard RehydratePriority = "Standard" +) + +// PossibleRehydratePriorityValues returns the possible values for the RehydratePriority const type. +func PossibleRehydratePriorityValues() []RehydratePriority { + return []RehydratePriority{ + RehydratePriorityHigh, + RehydratePriorityStandard, + } +} + +// ToPtr returns a *RehydratePriority pointing to the current value. +func (c RehydratePriority) ToPtr() *RehydratePriority { + return &c +} + +// SKUName enum +type SKUName string + +const ( + SKUNameStandardLRS SKUName = "Standard_LRS" + SKUNameStandardGRS SKUName = "Standard_GRS" + SKUNameStandardRAGRS SKUName = "Standard_RAGRS" + SKUNameStandardZRS SKUName = "Standard_ZRS" + SKUNamePremiumLRS SKUName = "Premium_LRS" +) + +// PossibleSKUNameValues returns the possible values for the SKUName const type. +func PossibleSKUNameValues() []SKUName { + return []SKUName{ + SKUNameStandardLRS, + SKUNameStandardGRS, + SKUNameStandardRAGRS, + SKUNameStandardZRS, + SKUNamePremiumLRS, + } +} + +// ToPtr returns a *SKUName pointing to the current value. +func (c SKUName) ToPtr() *SKUName { + return &c +} + +// SequenceNumberActionType enum +type SequenceNumberActionType string + +const ( + SequenceNumberActionTypeMax SequenceNumberActionType = "max" + SequenceNumberActionTypeUpdate SequenceNumberActionType = "update" + SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" +) + +// PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. +func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { + return []SequenceNumberActionType{ + SequenceNumberActionTypeMax, + SequenceNumberActionTypeUpdate, + SequenceNumberActionTypeIncrement, + } +} + +// ToPtr returns a *SequenceNumberActionType pointing to the current value. +func (c SequenceNumberActionType) ToPtr() *SequenceNumberActionType { + return &c +} + +// StorageErrorCode - Error codes returned by the service +type StorageErrorCode string + +const ( + StorageErrorCodeAccountAlreadyExists StorageErrorCode = "AccountAlreadyExists" + StorageErrorCodeAccountBeingCreated StorageErrorCode = "AccountBeingCreated" + StorageErrorCodeAccountIsDisabled StorageErrorCode = "AccountIsDisabled" + StorageErrorCodeAppendPositionConditionNotMet StorageErrorCode = "AppendPositionConditionNotMet" + StorageErrorCodeAuthenticationFailed StorageErrorCode = "AuthenticationFailed" + StorageErrorCodeAuthorizationFailure StorageErrorCode = "AuthorizationFailure" + StorageErrorCodeAuthorizationPermissionMismatch StorageErrorCode = "AuthorizationPermissionMismatch" + StorageErrorCodeAuthorizationProtocolMismatch StorageErrorCode = "AuthorizationProtocolMismatch" + StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch" + StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch" + StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch" + StorageErrorCodeBlobAlreadyExists StorageErrorCode = "BlobAlreadyExists" + StorageErrorCodeBlobArchived StorageErrorCode = "BlobArchived" + StorageErrorCodeBlobBeingRehydrated StorageErrorCode = "BlobBeingRehydrated" + StorageErrorCodeBlobImmutableDueToPolicy StorageErrorCode = "BlobImmutableDueToPolicy" + StorageErrorCodeBlobNotArchived StorageErrorCode = "BlobNotArchived" + StorageErrorCodeBlobNotFound StorageErrorCode = "BlobNotFound" + StorageErrorCodeBlobOverwritten StorageErrorCode = "BlobOverwritten" + StorageErrorCodeBlobTierInadequateForContentLength StorageErrorCode = "BlobTierInadequateForContentLength" + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption StorageErrorCode = "BlobUsesCustomerSpecifiedEncryption" + StorageErrorCodeBlockCountExceedsLimit StorageErrorCode = "BlockCountExceedsLimit" + StorageErrorCodeBlockListTooLong StorageErrorCode = "BlockListTooLong" + StorageErrorCodeCannotChangeToLowerTier StorageErrorCode = "CannotChangeToLowerTier" + StorageErrorCodeCannotVerifyCopySource StorageErrorCode = "CannotVerifyCopySource" + StorageErrorCodeConditionHeadersNotSupported StorageErrorCode = "ConditionHeadersNotSupported" + StorageErrorCodeConditionNotMet StorageErrorCode = "ConditionNotMet" + StorageErrorCodeContainerAlreadyExists StorageErrorCode = "ContainerAlreadyExists" + StorageErrorCodeContainerBeingDeleted StorageErrorCode = "ContainerBeingDeleted" + StorageErrorCodeContainerDisabled StorageErrorCode = "ContainerDisabled" + StorageErrorCodeContainerNotFound StorageErrorCode = "ContainerNotFound" + StorageErrorCodeContentLengthLargerThanTierLimit StorageErrorCode = "ContentLengthLargerThanTierLimit" + StorageErrorCodeCopyAcrossAccountsNotSupported StorageErrorCode = "CopyAcrossAccountsNotSupported" + StorageErrorCodeCopyIDMismatch StorageErrorCode = "CopyIdMismatch" + StorageErrorCodeEmptyMetadataKey StorageErrorCode = "EmptyMetadataKey" + StorageErrorCodeFeatureVersionMismatch StorageErrorCode = "FeatureVersionMismatch" + StorageErrorCodeIncrementalCopyBlobMismatch StorageErrorCode = "IncrementalCopyBlobMismatch" + StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed StorageErrorCode = "IncrementalCopyOfEralierVersionSnapshotNotAllowed" + StorageErrorCodeIncrementalCopySourceMustBeSnapshot StorageErrorCode = "IncrementalCopySourceMustBeSnapshot" + StorageErrorCodeInfiniteLeaseDurationRequired StorageErrorCode = "InfiniteLeaseDurationRequired" + StorageErrorCodeInsufficientAccountPermissions StorageErrorCode = "InsufficientAccountPermissions" + StorageErrorCodeInternalError StorageErrorCode = "InternalError" + StorageErrorCodeInvalidAuthenticationInfo StorageErrorCode = "InvalidAuthenticationInfo" + StorageErrorCodeInvalidBlobOrBlock StorageErrorCode = "InvalidBlobOrBlock" + StorageErrorCodeInvalidBlobTier StorageErrorCode = "InvalidBlobTier" + StorageErrorCodeInvalidBlobType StorageErrorCode = "InvalidBlobType" + StorageErrorCodeInvalidBlockID StorageErrorCode = "InvalidBlockId" + StorageErrorCodeInvalidBlockList StorageErrorCode = "InvalidBlockList" + StorageErrorCodeInvalidHTTPVerb StorageErrorCode = "InvalidHttpVerb" + StorageErrorCodeInvalidHeaderValue StorageErrorCode = "InvalidHeaderValue" + StorageErrorCodeInvalidInput StorageErrorCode = "InvalidInput" + StorageErrorCodeInvalidMD5 StorageErrorCode = "InvalidMd5" + StorageErrorCodeInvalidMetadata StorageErrorCode = "InvalidMetadata" + StorageErrorCodeInvalidOperation StorageErrorCode = "InvalidOperation" + StorageErrorCodeInvalidPageRange StorageErrorCode = "InvalidPageRange" + StorageErrorCodeInvalidQueryParameterValue StorageErrorCode = "InvalidQueryParameterValue" + StorageErrorCodeInvalidRange StorageErrorCode = "InvalidRange" + StorageErrorCodeInvalidResourceName StorageErrorCode = "InvalidResourceName" + StorageErrorCodeInvalidSourceBlobType StorageErrorCode = "InvalidSourceBlobType" + StorageErrorCodeInvalidSourceBlobURL StorageErrorCode = "InvalidSourceBlobUrl" + StorageErrorCodeInvalidURI StorageErrorCode = "InvalidUri" + StorageErrorCodeInvalidVersionForPageBlobOperation StorageErrorCode = "InvalidVersionForPageBlobOperation" + StorageErrorCodeInvalidXMLDocument StorageErrorCode = "InvalidXmlDocument" + StorageErrorCodeInvalidXMLNodeValue StorageErrorCode = "InvalidXmlNodeValue" + StorageErrorCodeLeaseAlreadyBroken StorageErrorCode = "LeaseAlreadyBroken" + StorageErrorCodeLeaseAlreadyPresent StorageErrorCode = "LeaseAlreadyPresent" + StorageErrorCodeLeaseIDMismatchWithBlobOperation StorageErrorCode = "LeaseIdMismatchWithBlobOperation" + StorageErrorCodeLeaseIDMismatchWithContainerOperation StorageErrorCode = "LeaseIdMismatchWithContainerOperation" + StorageErrorCodeLeaseIDMismatchWithLeaseOperation StorageErrorCode = "LeaseIdMismatchWithLeaseOperation" + StorageErrorCodeLeaseIDMissing StorageErrorCode = "LeaseIdMissing" + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired StorageErrorCode = "LeaseIsBreakingAndCannotBeAcquired" + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged StorageErrorCode = "LeaseIsBreakingAndCannotBeChanged" + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed StorageErrorCode = "LeaseIsBrokenAndCannotBeRenewed" + StorageErrorCodeLeaseLost StorageErrorCode = "LeaseLost" + StorageErrorCodeLeaseNotPresentWithBlobOperation StorageErrorCode = "LeaseNotPresentWithBlobOperation" + StorageErrorCodeLeaseNotPresentWithContainerOperation StorageErrorCode = "LeaseNotPresentWithContainerOperation" + StorageErrorCodeLeaseNotPresentWithLeaseOperation StorageErrorCode = "LeaseNotPresentWithLeaseOperation" + StorageErrorCodeMD5Mismatch StorageErrorCode = "Md5Mismatch" + StorageErrorCodeMaxBlobSizeConditionNotMet StorageErrorCode = "MaxBlobSizeConditionNotMet" + StorageErrorCodeMetadataTooLarge StorageErrorCode = "MetadataTooLarge" + StorageErrorCodeMissingContentLengthHeader StorageErrorCode = "MissingContentLengthHeader" + StorageErrorCodeMissingRequiredHeader StorageErrorCode = "MissingRequiredHeader" + StorageErrorCodeMissingRequiredQueryParameter StorageErrorCode = "MissingRequiredQueryParameter" + StorageErrorCodeMissingRequiredXMLNode StorageErrorCode = "MissingRequiredXmlNode" + StorageErrorCodeMultipleConditionHeadersNotSupported StorageErrorCode = "MultipleConditionHeadersNotSupported" + StorageErrorCodeNoAuthenticationInformation StorageErrorCode = "NoAuthenticationInformation" + StorageErrorCodeNoPendingCopyOperation StorageErrorCode = "NoPendingCopyOperation" + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob StorageErrorCode = "OperationNotAllowedOnIncrementalCopyBlob" + StorageErrorCodeOperationTimedOut StorageErrorCode = "OperationTimedOut" + StorageErrorCodeOutOfRangeInput StorageErrorCode = "OutOfRangeInput" + StorageErrorCodeOutOfRangeQueryParameterValue StorageErrorCode = "OutOfRangeQueryParameterValue" + StorageErrorCodePendingCopyOperation StorageErrorCode = "PendingCopyOperation" + StorageErrorCodePreviousSnapshotCannotBeNewer StorageErrorCode = "PreviousSnapshotCannotBeNewer" + StorageErrorCodePreviousSnapshotNotFound StorageErrorCode = "PreviousSnapshotNotFound" + StorageErrorCodePreviousSnapshotOperationNotSupported StorageErrorCode = "PreviousSnapshotOperationNotSupported" + StorageErrorCodeRequestBodyTooLarge StorageErrorCode = "RequestBodyTooLarge" + StorageErrorCodeRequestURLFailedToParse StorageErrorCode = "RequestUrlFailedToParse" + StorageErrorCodeResourceAlreadyExists StorageErrorCode = "ResourceAlreadyExists" + StorageErrorCodeResourceNotFound StorageErrorCode = "ResourceNotFound" + StorageErrorCodeResourceTypeMismatch StorageErrorCode = "ResourceTypeMismatch" + StorageErrorCodeSequenceNumberConditionNotMet StorageErrorCode = "SequenceNumberConditionNotMet" + StorageErrorCodeSequenceNumberIncrementTooLarge StorageErrorCode = "SequenceNumberIncrementTooLarge" + StorageErrorCodeServerBusy StorageErrorCode = "ServerBusy" + StorageErrorCodeSnapshotCountExceeded StorageErrorCode = "SnapshotCountExceeded" + StorageErrorCodeSnapshotOperationRateExceeded StorageErrorCode = "SnapshotOperationRateExceeded" + StorageErrorCodeSnapshotsPresent StorageErrorCode = "SnapshotsPresent" + StorageErrorCodeSourceConditionNotMet StorageErrorCode = "SourceConditionNotMet" + StorageErrorCodeSystemInUse StorageErrorCode = "SystemInUse" + StorageErrorCodeTargetConditionNotMet StorageErrorCode = "TargetConditionNotMet" + StorageErrorCodeUnauthorizedBlobOverwrite StorageErrorCode = "UnauthorizedBlobOverwrite" + StorageErrorCodeUnsupportedHTTPVerb StorageErrorCode = "UnsupportedHttpVerb" + StorageErrorCodeUnsupportedHeader StorageErrorCode = "UnsupportedHeader" + StorageErrorCodeUnsupportedQueryParameter StorageErrorCode = "UnsupportedQueryParameter" + StorageErrorCodeUnsupportedXMLNode StorageErrorCode = "UnsupportedXmlNode" +) + +// PossibleStorageErrorCodeValues returns the possible values for the StorageErrorCode const type. +func PossibleStorageErrorCodeValues() []StorageErrorCode { + return []StorageErrorCode{ + StorageErrorCodeAccountAlreadyExists, + StorageErrorCodeAccountBeingCreated, + StorageErrorCodeAccountIsDisabled, + StorageErrorCodeAppendPositionConditionNotMet, + StorageErrorCodeAuthenticationFailed, + StorageErrorCodeAuthorizationFailure, + StorageErrorCodeAuthorizationPermissionMismatch, + StorageErrorCodeAuthorizationProtocolMismatch, + StorageErrorCodeAuthorizationResourceTypeMismatch, + StorageErrorCodeAuthorizationServiceMismatch, + StorageErrorCodeAuthorizationSourceIPMismatch, + StorageErrorCodeBlobAlreadyExists, + StorageErrorCodeBlobArchived, + StorageErrorCodeBlobBeingRehydrated, + StorageErrorCodeBlobImmutableDueToPolicy, + StorageErrorCodeBlobNotArchived, + StorageErrorCodeBlobNotFound, + StorageErrorCodeBlobOverwritten, + StorageErrorCodeBlobTierInadequateForContentLength, + StorageErrorCodeBlobUsesCustomerSpecifiedEncryption, + StorageErrorCodeBlockCountExceedsLimit, + StorageErrorCodeBlockListTooLong, + StorageErrorCodeCannotChangeToLowerTier, + StorageErrorCodeCannotVerifyCopySource, + StorageErrorCodeConditionHeadersNotSupported, + StorageErrorCodeConditionNotMet, + StorageErrorCodeContainerAlreadyExists, + StorageErrorCodeContainerBeingDeleted, + StorageErrorCodeContainerDisabled, + StorageErrorCodeContainerNotFound, + StorageErrorCodeContentLengthLargerThanTierLimit, + StorageErrorCodeCopyAcrossAccountsNotSupported, + StorageErrorCodeCopyIDMismatch, + StorageErrorCodeEmptyMetadataKey, + StorageErrorCodeFeatureVersionMismatch, + StorageErrorCodeIncrementalCopyBlobMismatch, + StorageErrorCodeIncrementalCopyOfEralierVersionSnapshotNotAllowed, + StorageErrorCodeIncrementalCopySourceMustBeSnapshot, + StorageErrorCodeInfiniteLeaseDurationRequired, + StorageErrorCodeInsufficientAccountPermissions, + StorageErrorCodeInternalError, + StorageErrorCodeInvalidAuthenticationInfo, + StorageErrorCodeInvalidBlobOrBlock, + StorageErrorCodeInvalidBlobTier, + StorageErrorCodeInvalidBlobType, + StorageErrorCodeInvalidBlockID, + StorageErrorCodeInvalidBlockList, + StorageErrorCodeInvalidHTTPVerb, + StorageErrorCodeInvalidHeaderValue, + StorageErrorCodeInvalidInput, + StorageErrorCodeInvalidMD5, + StorageErrorCodeInvalidMetadata, + StorageErrorCodeInvalidOperation, + StorageErrorCodeInvalidPageRange, + StorageErrorCodeInvalidQueryParameterValue, + StorageErrorCodeInvalidRange, + StorageErrorCodeInvalidResourceName, + StorageErrorCodeInvalidSourceBlobType, + StorageErrorCodeInvalidSourceBlobURL, + StorageErrorCodeInvalidURI, + StorageErrorCodeInvalidVersionForPageBlobOperation, + StorageErrorCodeInvalidXMLDocument, + StorageErrorCodeInvalidXMLNodeValue, + StorageErrorCodeLeaseAlreadyBroken, + StorageErrorCodeLeaseAlreadyPresent, + StorageErrorCodeLeaseIDMismatchWithBlobOperation, + StorageErrorCodeLeaseIDMismatchWithContainerOperation, + StorageErrorCodeLeaseIDMismatchWithLeaseOperation, + StorageErrorCodeLeaseIDMissing, + StorageErrorCodeLeaseIsBreakingAndCannotBeAcquired, + StorageErrorCodeLeaseIsBreakingAndCannotBeChanged, + StorageErrorCodeLeaseIsBrokenAndCannotBeRenewed, + StorageErrorCodeLeaseLost, + StorageErrorCodeLeaseNotPresentWithBlobOperation, + StorageErrorCodeLeaseNotPresentWithContainerOperation, + StorageErrorCodeLeaseNotPresentWithLeaseOperation, + StorageErrorCodeMD5Mismatch, + StorageErrorCodeMaxBlobSizeConditionNotMet, + StorageErrorCodeMetadataTooLarge, + StorageErrorCodeMissingContentLengthHeader, + StorageErrorCodeMissingRequiredHeader, + StorageErrorCodeMissingRequiredQueryParameter, + StorageErrorCodeMissingRequiredXMLNode, + StorageErrorCodeMultipleConditionHeadersNotSupported, + StorageErrorCodeNoAuthenticationInformation, + StorageErrorCodeNoPendingCopyOperation, + StorageErrorCodeOperationNotAllowedOnIncrementalCopyBlob, + StorageErrorCodeOperationTimedOut, + StorageErrorCodeOutOfRangeInput, + StorageErrorCodeOutOfRangeQueryParameterValue, + StorageErrorCodePendingCopyOperation, + StorageErrorCodePreviousSnapshotCannotBeNewer, + StorageErrorCodePreviousSnapshotNotFound, + StorageErrorCodePreviousSnapshotOperationNotSupported, + StorageErrorCodeRequestBodyTooLarge, + StorageErrorCodeRequestURLFailedToParse, + StorageErrorCodeResourceAlreadyExists, + StorageErrorCodeResourceNotFound, + StorageErrorCodeResourceTypeMismatch, + StorageErrorCodeSequenceNumberConditionNotMet, + StorageErrorCodeSequenceNumberIncrementTooLarge, + StorageErrorCodeServerBusy, + StorageErrorCodeSnapshotCountExceeded, + StorageErrorCodeSnapshotOperationRateExceeded, + StorageErrorCodeSnapshotsPresent, + StorageErrorCodeSourceConditionNotMet, + StorageErrorCodeSystemInUse, + StorageErrorCodeTargetConditionNotMet, + StorageErrorCodeUnauthorizedBlobOverwrite, + StorageErrorCodeUnsupportedHTTPVerb, + StorageErrorCodeUnsupportedHeader, + StorageErrorCodeUnsupportedQueryParameter, + StorageErrorCodeUnsupportedXMLNode, + } +} + +// ToPtr returns a *StorageErrorCode pointing to the current value. +func (c StorageErrorCode) ToPtr() *StorageErrorCode { + return &c +} + +// BlobDeleteType enum +type BlobDeleteType string + +const ( + BlobDeleteTypeNone BlobDeleteType = "None" + BlobDeleteTypePermanent BlobDeleteType = "Permanent" +) + +// PossibleBlobDeleteTypeValues returns the possible values for the BlobDeleteType const type. +func PossibleBlobDeleteTypeValues() []BlobDeleteType { + return []BlobDeleteType{ + BlobDeleteTypeNone, + BlobDeleteTypePermanent, + } +} + +// ToPtr returns a *BlobDeleteType pointing to the current value. +func (c BlobDeleteType) ToPtr() *BlobDeleteType { + return &c +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go new file mode 100644 index 000000000..c9245ce10 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_container_client.go @@ -0,0 +1,1442 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "encoding/xml" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +type containerClient struct { + endpoint string + pl runtime.Pipeline +} + +// newContainerClient creates a new instance of containerClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newContainerClient(endpoint string, pl runtime.Pipeline) *containerClient { + client := &containerClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientAcquireLeaseOptions - containerClientAcquireLeaseOptions contains the optional parameters for the containerClient.AcquireLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) AcquireLease(ctx context.Context, containerClientAcquireLeaseOptions *containerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientAcquireLeaseResponse, error) { + req, err := client.acquireLeaseCreateRequest(ctx, containerClientAcquireLeaseOptions, modifiedAccessConditions) + if err != nil { + return containerClientAcquireLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientAcquireLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return containerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.acquireLeaseHandleResponse(resp) +} + +// acquireLeaseCreateRequest creates the AcquireLease request. +func (client *containerClient) acquireLeaseCreateRequest(ctx context.Context, containerClientAcquireLeaseOptions *containerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientAcquireLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "acquire") + if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.Duration != nil { + req.Raw().Header.Set("x-ms-lease-duration", strconv.FormatInt(int64(*containerClientAcquireLeaseOptions.Duration), 10)) + } + if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.ProposedLeaseID != nil { + req.Raw().Header.Set("x-ms-proposed-lease-id", *containerClientAcquireLeaseOptions.ProposedLeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientAcquireLeaseOptions != nil && containerClientAcquireLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientAcquireLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// acquireLeaseHandleResponse handles the AcquireLease response. +func (client *containerClient) acquireLeaseHandleResponse(resp *http.Response) (containerClientAcquireLeaseResponse, error) { + result := containerClientAcquireLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientAcquireLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientAcquireLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// BreakLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientBreakLeaseOptions - containerClientBreakLeaseOptions contains the optional parameters for the containerClient.BreakLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) BreakLease(ctx context.Context, containerClientBreakLeaseOptions *containerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientBreakLeaseResponse, error) { + req, err := client.breakLeaseCreateRequest(ctx, containerClientBreakLeaseOptions, modifiedAccessConditions) + if err != nil { + return containerClientBreakLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientBreakLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return containerClientBreakLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.breakLeaseHandleResponse(resp) +} + +// breakLeaseCreateRequest creates the BreakLease request. +func (client *containerClient) breakLeaseCreateRequest(ctx context.Context, containerClientBreakLeaseOptions *containerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientBreakLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "break") + if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.BreakPeriod != nil { + req.Raw().Header.Set("x-ms-lease-break-period", strconv.FormatInt(int64(*containerClientBreakLeaseOptions.BreakPeriod), 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientBreakLeaseOptions != nil && containerClientBreakLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientBreakLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// breakLeaseHandleResponse handles the BreakLease response. +func (client *containerClient) breakLeaseHandleResponse(resp *http.Response) (containerClientBreakLeaseResponse, error) { + result := containerClientBreakLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientBreakLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-time"); val != "" { + leaseTime32, err := strconv.ParseInt(val, 10, 32) + leaseTime := int32(leaseTime32) + if err != nil { + return containerClientBreakLeaseResponse{}, err + } + result.LeaseTime = &leaseTime + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientBreakLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// ChangeLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed +// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID +// string formats. +// containerClientChangeLeaseOptions - containerClientChangeLeaseOptions contains the optional parameters for the containerClient.ChangeLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, containerClientChangeLeaseOptions *containerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientChangeLeaseResponse, error) { + req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, containerClientChangeLeaseOptions, modifiedAccessConditions) + if err != nil { + return containerClientChangeLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientChangeLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientChangeLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.changeLeaseHandleResponse(resp) +} + +// changeLeaseCreateRequest creates the ChangeLease request. +func (client *containerClient) changeLeaseCreateRequest(ctx context.Context, leaseID string, proposedLeaseID string, containerClientChangeLeaseOptions *containerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if containerClientChangeLeaseOptions != nil && containerClientChangeLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientChangeLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "change") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + req.Raw().Header.Set("x-ms-proposed-lease-id", proposedLeaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientChangeLeaseOptions != nil && containerClientChangeLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientChangeLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// changeLeaseHandleResponse handles the ChangeLease response. +func (client *containerClient) changeLeaseHandleResponse(resp *http.Response) (containerClientChangeLeaseResponse, error) { + result := containerClientChangeLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientChangeLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientChangeLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Create - creates a new container under the specified account. If the container with the same name already exists, the operation +// fails +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientCreateOptions - containerClientCreateOptions contains the optional parameters for the containerClient.Create +// method. +// ContainerCpkScopeInfo - ContainerCpkScopeInfo contains a group of parameters for the containerClient.Create method. +func (client *containerClient) Create(ctx context.Context, containerClientCreateOptions *containerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (containerClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, containerClientCreateOptions, containerCpkScopeInfo) + if err != nil { + return containerClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return containerClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *containerClient) createCreateRequest(ctx context.Context, containerClientCreateOptions *containerClientCreateOptions, containerCpkScopeInfo *ContainerCpkScopeInfo) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if containerClientCreateOptions != nil && containerClientCreateOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientCreateOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if containerClientCreateOptions != nil && containerClientCreateOptions.Metadata != nil { + for k, v := range containerClientCreateOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if containerClientCreateOptions != nil && containerClientCreateOptions.Access != nil { + req.Raw().Header.Set("x-ms-blob-public-access", string(*containerClientCreateOptions.Access)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientCreateOptions != nil && containerClientCreateOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientCreateOptions.RequestID) + } + if containerCpkScopeInfo != nil && containerCpkScopeInfo.DefaultEncryptionScope != nil { + req.Raw().Header.Set("x-ms-default-encryption-scope", *containerCpkScopeInfo.DefaultEncryptionScope) + } + if containerCpkScopeInfo != nil && containerCpkScopeInfo.PreventEncryptionScopeOverride != nil { + req.Raw().Header.Set("x-ms-deny-encryption-scope-override", strconv.FormatBool(*containerCpkScopeInfo.PreventEncryptionScopeOverride)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *containerClient) createHandleResponse(resp *http.Response) (containerClientCreateResponse, error) { + result := containerClientCreateResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientCreateResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Delete - operation marks the specified container for deletion. The container and any blobs contained within it are later +// deleted during garbage collection +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientDeleteOptions - containerClientDeleteOptions contains the optional parameters for the containerClient.Delete +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) Delete(ctx context.Context, containerClientDeleteOptions *containerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientDeleteResponse, error) { + req, err := client.deleteCreateRequest(ctx, containerClientDeleteOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return containerClientDeleteResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientDeleteResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return containerClientDeleteResponse{}, runtime.NewResponseError(resp) + } + return client.deleteHandleResponse(resp) +} + +// deleteCreateRequest creates the Delete request. +func (client *containerClient) deleteCreateRequest(ctx context.Context, containerClientDeleteOptions *containerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodDelete, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if containerClientDeleteOptions != nil && containerClientDeleteOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientDeleteOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientDeleteOptions != nil && containerClientDeleteOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientDeleteOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// deleteHandleResponse handles the Delete response. +func (client *containerClient) deleteHandleResponse(resp *http.Response) (containerClientDeleteResponse, error) { + result := containerClientDeleteResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientDeleteResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may +// be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientGetAccessPolicyOptions - containerClientGetAccessPolicyOptions contains the optional parameters for the +// containerClient.GetAccessPolicy method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *containerClient) GetAccessPolicy(ctx context.Context, containerClientGetAccessPolicyOptions *containerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (containerClientGetAccessPolicyResponse, error) { + req, err := client.getAccessPolicyCreateRequest(ctx, containerClientGetAccessPolicyOptions, leaseAccessConditions) + if err != nil { + return containerClientGetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientGetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.getAccessPolicyHandleResponse(resp) +} + +// getAccessPolicyCreateRequest creates the GetAccessPolicy request. +func (client *containerClient) getAccessPolicyCreateRequest(ctx context.Context, containerClientGetAccessPolicyOptions *containerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "acl") + if containerClientGetAccessPolicyOptions != nil && containerClientGetAccessPolicyOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientGetAccessPolicyOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientGetAccessPolicyOptions != nil && containerClientGetAccessPolicyOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientGetAccessPolicyOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getAccessPolicyHandleResponse handles the GetAccessPolicy response. +func (client *containerClient) getAccessPolicyHandleResponse(resp *http.Response) (containerClientGetAccessPolicyResponse, error) { + result := containerClientGetAccessPolicyResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientGetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientGetAccessPolicyResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result); err != nil { + return containerClientGetAccessPolicyResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// options - containerClientGetAccountInfoOptions contains the optional parameters for the containerClient.GetAccountInfo +// method. +func (client *containerClient) GetAccountInfo(ctx context.Context, options *containerClientGetAccountInfoOptions) (containerClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return containerClientGetAccountInfoResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *containerClient) getAccountInfoCreateRequest(ctx context.Context, options *containerClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *containerClient) getAccountInfoHandleResponse(resp *http.Response) (containerClientGetAccountInfoResponse, error) { + result := containerClientGetAccountInfoResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + return result, nil +} + +// GetProperties - returns all user-defined metadata and system properties for the specified container. The data returned +// does not include the container's list of blobs +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientGetPropertiesOptions - containerClientGetPropertiesOptions contains the optional parameters for the containerClient.GetProperties +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +func (client *containerClient) GetProperties(ctx context.Context, containerClientGetPropertiesOptions *containerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (containerClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, containerClientGetPropertiesOptions, leaseAccessConditions) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *containerClient) getPropertiesCreateRequest(ctx context.Context, containerClientGetPropertiesOptions *containerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + if containerClientGetPropertiesOptions != nil && containerClientGetPropertiesOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientGetPropertiesOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientGetPropertiesOptions != nil && containerClientGetPropertiesOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientGetPropertiesOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *containerClient) getPropertiesHandleResponse(resp *http.Response) (containerClientGetPropertiesResponse, error) { + result := containerClientGetPropertiesResponse{RawResponse: resp} + for hh := range resp.Header { + if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { + if result.Metadata == nil { + result.Metadata = map[string]string{} + } + result.Metadata[hh[len("x-ms-meta-"):]] = resp.Header.Get(hh) + } + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-duration"); val != "" { + result.LeaseDuration = (*LeaseDurationType)(&val) + } + if val := resp.Header.Get("x-ms-lease-state"); val != "" { + result.LeaseState = (*LeaseStateType)(&val) + } + if val := resp.Header.Get("x-ms-lease-status"); val != "" { + result.LeaseStatus = (*LeaseStatusType)(&val) + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-blob-public-access"); val != "" { + result.BlobPublicAccess = (*PublicAccessType)(&val) + } + if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" { + hasImmutabilityPolicy, err := strconv.ParseBool(val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.HasImmutabilityPolicy = &hasImmutabilityPolicy + } + if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" { + hasLegalHold, err := strconv.ParseBool(val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.HasLegalHold = &hasLegalHold + } + if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" { + result.DefaultEncryptionScope = &val + } + if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" { + denyEncryptionScopeOverride, err := strconv.ParseBool(val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride + } + if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" { + isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val) + if err != nil { + return containerClientGetPropertiesResponse{}, err + } + result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled + } + return result, nil +} + +// ListBlobFlatSegment - [Update] The List Blobs operation returns a list of the blobs under the specified container +// If the operation fails it returns an *azcore.ResponseError type. +// options - containerClientListBlobFlatSegmentOptions contains the optional parameters for the containerClient.ListBlobFlatSegment +// method. +func (client *containerClient) ListBlobFlatSegment(options *containerClientListBlobFlatSegmentOptions) *containerClientListBlobFlatSegmentPager { + return &containerClientListBlobFlatSegmentPager{ + client: client, + requester: func(ctx context.Context) (*policy.Request, error) { + return client.listBlobFlatSegmentCreateRequest(ctx, options) + }, + advancer: func(ctx context.Context, resp containerClientListBlobFlatSegmentResponse) (*policy.Request, error) { + return runtime.NewRequest(ctx, http.MethodGet, *resp.ListBlobsFlatSegmentResponse.NextMarker) + }, + } +} + +// listBlobFlatSegmentCreateRequest creates the ListBlobFlatSegment request. +func (client *containerClient) listBlobFlatSegmentCreateRequest(ctx context.Context, options *containerClientListBlobFlatSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response. +func (client *containerClient) listBlobFlatSegmentHandleResponse(resp *http.Response) (containerClientListBlobFlatSegmentResponse, error) { + result := containerClientListBlobFlatSegmentResponse{RawResponse: resp} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientListBlobFlatSegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil { + return containerClientListBlobFlatSegmentResponse{}, err + } + return result, nil +} + +// ListBlobHierarchySegment - [Update] The List Blobs operation returns a list of the blobs under the specified container +// If the operation fails it returns an *azcore.ResponseError type. +// delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that +// acts as a placeholder for all blobs whose names begin with the same substring up to the +// appearance of the delimiter character. The delimiter may be a single character or a string. +// options - containerClientListBlobHierarchySegmentOptions contains the optional parameters for the containerClient.ListBlobHierarchySegment +// method. +func (client *containerClient) ListBlobHierarchySegment(delimiter string, options *containerClientListBlobHierarchySegmentOptions) *containerClientListBlobHierarchySegmentPager { + return &containerClientListBlobHierarchySegmentPager{ + client: client, + requester: func(ctx context.Context) (*policy.Request, error) { + return client.listBlobHierarchySegmentCreateRequest(ctx, delimiter, options) + }, + advancer: func(ctx context.Context, resp containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) { + return runtime.NewRequest(ctx, http.MethodGet, *resp.ListBlobsHierarchySegmentResponse.NextMarker) + }, + } +} + +// listBlobHierarchySegmentCreateRequest creates the ListBlobHierarchySegment request. +func (client *containerClient) listBlobHierarchySegmentCreateRequest(ctx context.Context, delimiter string, options *containerClientListBlobHierarchySegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + reqQP.Set("delimiter", delimiter) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// listBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response. +func (client *containerClient) listBlobHierarchySegmentHandleResponse(resp *http.Response) (containerClientListBlobHierarchySegmentResponse, error) { + result := containerClientListBlobHierarchySegmentResponse{RawResponse: resp} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientListBlobHierarchySegmentResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil { + return containerClientListBlobHierarchySegmentResponse{}, err + } + return result, nil +} + +// ReleaseLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// containerClientReleaseLeaseOptions - containerClientReleaseLeaseOptions contains the optional parameters for the containerClient.ReleaseLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) ReleaseLease(ctx context.Context, leaseID string, containerClientReleaseLeaseOptions *containerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientReleaseLeaseResponse, error) { + req, err := client.releaseLeaseCreateRequest(ctx, leaseID, containerClientReleaseLeaseOptions, modifiedAccessConditions) + if err != nil { + return containerClientReleaseLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientReleaseLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.releaseLeaseHandleResponse(resp) +} + +// releaseLeaseCreateRequest creates the ReleaseLease request. +func (client *containerClient) releaseLeaseCreateRequest(ctx context.Context, leaseID string, containerClientReleaseLeaseOptions *containerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if containerClientReleaseLeaseOptions != nil && containerClientReleaseLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientReleaseLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "release") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientReleaseLeaseOptions != nil && containerClientReleaseLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientReleaseLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// releaseLeaseHandleResponse handles the ReleaseLease response. +func (client *containerClient) releaseLeaseHandleResponse(resp *http.Response) (containerClientReleaseLeaseResponse, error) { + result := containerClientReleaseLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientReleaseLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientReleaseLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Rename - Renames an existing container. +// If the operation fails it returns an *azcore.ResponseError type. +// sourceContainerName - Required. Specifies the name of the container to rename. +// options - containerClientRenameOptions contains the optional parameters for the containerClient.Rename method. +func (client *containerClient) Rename(ctx context.Context, sourceContainerName string, options *containerClientRenameOptions) (containerClientRenameResponse, error) { + req, err := client.renameCreateRequest(ctx, sourceContainerName, options) + if err != nil { + return containerClientRenameResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientRenameResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientRenameResponse{}, runtime.NewResponseError(resp) + } + return client.renameHandleResponse(resp) +} + +// renameCreateRequest creates the Rename request. +func (client *containerClient) renameCreateRequest(ctx context.Context, sourceContainerName string, options *containerClientRenameOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "rename") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("x-ms-source-container-name", sourceContainerName) + if options != nil && options.SourceLeaseID != nil { + req.Raw().Header.Set("x-ms-source-lease-id", *options.SourceLeaseID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// renameHandleResponse handles the Rename response. +func (client *containerClient) renameHandleResponse(resp *http.Response) (containerClientRenameResponse, error) { + result := containerClientRenameResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientRenameResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// RenewLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 +// to 60 seconds, or can be infinite +// If the operation fails it returns an *azcore.ResponseError type. +// leaseID - Specifies the current lease ID on the resource. +// containerClientRenewLeaseOptions - containerClientRenewLeaseOptions contains the optional parameters for the containerClient.RenewLease +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) RenewLease(ctx context.Context, leaseID string, containerClientRenewLeaseOptions *containerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientRenewLeaseResponse, error) { + req, err := client.renewLeaseCreateRequest(ctx, leaseID, containerClientRenewLeaseOptions, modifiedAccessConditions) + if err != nil { + return containerClientRenewLeaseResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientRenewLeaseResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientRenewLeaseResponse{}, runtime.NewResponseError(resp) + } + return client.renewLeaseHandleResponse(resp) +} + +// renewLeaseCreateRequest creates the RenewLease request. +func (client *containerClient) renewLeaseCreateRequest(ctx context.Context, leaseID string, containerClientRenewLeaseOptions *containerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "lease") + reqQP.Set("restype", "container") + if containerClientRenewLeaseOptions != nil && containerClientRenewLeaseOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientRenewLeaseOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-lease-action", "renew") + req.Raw().Header.Set("x-ms-lease-id", leaseID) + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientRenewLeaseOptions != nil && containerClientRenewLeaseOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientRenewLeaseOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// renewLeaseHandleResponse handles the RenewLease response. +func (client *containerClient) renewLeaseHandleResponse(resp *http.Response) (containerClientRenewLeaseResponse, error) { + result := containerClientRenewLeaseResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientRenewLeaseResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-lease-id"); val != "" { + result.LeaseID = &val + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientRenewLeaseResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// Restore - Restores a previously-deleted container. +// If the operation fails it returns an *azcore.ResponseError type. +// options - containerClientRestoreOptions contains the optional parameters for the containerClient.Restore method. +func (client *containerClient) Restore(ctx context.Context, options *containerClientRestoreOptions) (containerClientRestoreResponse, error) { + req, err := client.restoreCreateRequest(ctx, options) + if err != nil { + return containerClientRestoreResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientRestoreResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return containerClientRestoreResponse{}, runtime.NewResponseError(resp) + } + return client.restoreHandleResponse(resp) +} + +// restoreCreateRequest creates the Restore request. +func (client *containerClient) restoreCreateRequest(ctx context.Context, options *containerClientRestoreOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "undelete") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + if options != nil && options.DeletedContainerName != nil { + req.Raw().Header.Set("x-ms-deleted-container-name", *options.DeletedContainerName) + } + if options != nil && options.DeletedContainerVersion != nil { + req.Raw().Header.Set("x-ms-deleted-container-version", *options.DeletedContainerVersion) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// restoreHandleResponse handles the Restore response. +func (client *containerClient) restoreHandleResponse(resp *http.Response) (containerClientRestoreResponse, error) { + result := containerClientRestoreResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientRestoreResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetAccessPolicy - sets the permissions for the specified container. The permissions indicate whether blobs in a container +// may be accessed publicly. +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientSetAccessPolicyOptions - containerClientSetAccessPolicyOptions contains the optional parameters for the +// containerClient.SetAccessPolicy method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) SetAccessPolicy(ctx context.Context, containerClientSetAccessPolicyOptions *containerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientSetAccessPolicyResponse, error) { + req, err := client.setAccessPolicyCreateRequest(ctx, containerClientSetAccessPolicyOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return containerClientSetAccessPolicyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientSetAccessPolicyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp) + } + return client.setAccessPolicyHandleResponse(resp) +} + +// setAccessPolicyCreateRequest creates the SetAccessPolicy request. +func (client *containerClient) setAccessPolicyCreateRequest(ctx context.Context, containerClientSetAccessPolicyOptions *containerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "acl") + if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientSetAccessPolicyOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.Access != nil { + req.Raw().Header.Set("x-ms-blob-public-access", string(*containerClientSetAccessPolicyOptions.Access)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientSetAccessPolicyOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + type wrapper struct { + XMLName xml.Name `xml:"SignedIdentifiers"` + ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` + } + if containerClientSetAccessPolicyOptions != nil && containerClientSetAccessPolicyOptions.ContainerACL != nil { + return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerClientSetAccessPolicyOptions.ContainerACL}) + } + return req, nil +} + +// setAccessPolicyHandleResponse handles the SetAccessPolicy response. +func (client *containerClient) setAccessPolicyHandleResponse(resp *http.Response) (containerClientSetAccessPolicyResponse, error) { + result := containerClientSetAccessPolicyResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientSetAccessPolicyResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientSetAccessPolicyResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. +// If the operation fails it returns an *azcore.ResponseError type. +// containerClientSetMetadataOptions - containerClientSetMetadataOptions contains the optional parameters for the containerClient.SetMetadata +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *containerClient) SetMetadata(ctx context.Context, containerClientSetMetadataOptions *containerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (containerClientSetMetadataResponse, error) { + req, err := client.setMetadataCreateRequest(ctx, containerClientSetMetadataOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return containerClientSetMetadataResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientSetMetadataResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return containerClientSetMetadataResponse{}, runtime.NewResponseError(resp) + } + return client.setMetadataHandleResponse(resp) +} + +// setMetadataCreateRequest creates the SetMetadata request. +func (client *containerClient) setMetadataCreateRequest(ctx context.Context, containerClientSetMetadataOptions *containerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "metadata") + if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*containerClientSetMetadataOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.Metadata != nil { + for k, v := range containerClientSetMetadataOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if containerClientSetMetadataOptions != nil && containerClientSetMetadataOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *containerClientSetMetadataOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// setMetadataHandleResponse handles the SetMetadata response. +func (client *containerClient) setMetadataHandleResponse(resp *http.Response) (containerClientSetMetadataResponse, error) { + result := containerClientSetMetadataResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientSetMetadataResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return containerClientSetMetadataResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// body - Initial data +// options - containerClientSubmitBatchOptions contains the optional parameters for the containerClient.SubmitBatch method. +func (client *containerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *containerClientSubmitBatchOptions) (containerClientSubmitBatchResponse, error) { + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return containerClientSubmitBatchResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return containerClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return containerClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + } + return client.submitBatchHandleResponse(resp) +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *containerClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *containerClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "batch") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Raw().Header.Set("Content-Type", multipartContentType) + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, runtime.MarshalAsXML(req, body) +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *containerClient) submitBatchHandleResponse(resp *http.Response) (containerClientSubmitBatchResponse, error) { + result := containerClientSubmitBatchResponse{RawResponse: resp} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go new file mode 100644 index 000000000..d40d63b1b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_models.go @@ -0,0 +1,2158 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "encoding/xml" + "time" +) + +// AccessPolicy - An Access policy +type AccessPolicy struct { + // the date-time the policy expires + Expiry *time.Time `xml:"Expiry"` + + // the permissions for the acl policy + Permission *string `xml:"Permission"` + + // the date-time the policy is active + Start *time.Time `xml:"Start"` +} + +// MarshalXML implements the xml.Marshaller interface for type AccessPolicy. +func (a AccessPolicy) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(&a), + Expiry: (*timeRFC3339)(a.Expiry), + Start: (*timeRFC3339)(a.Start), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type AccessPolicy. +func (a *AccessPolicy) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias AccessPolicy + aux := &struct { + *alias + Expiry *timeRFC3339 `xml:"Expiry"` + Start *timeRFC3339 `xml:"Start"` + }{ + alias: (*alias)(a), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + a.Expiry = (*time.Time)(aux.Expiry) + a.Start = (*time.Time)(aux.Start) + return nil +} + +// AppendPositionAccessConditions contains a group of parameters for the appendBlobClient.AppendBlock method. +type AppendPositionAccessConditions struct { + // Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare. + // Append Block will succeed only if the append position is equal to this number. If + // it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed). + AppendPosition *int64 + // Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would + // cause the blob to exceed that limit or if the blob size is already greater than + // the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 - + // Precondition Failed). + MaxSize *int64 +} + +// ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted. +type ArrowConfiguration struct { + // REQUIRED + Schema []*ArrowField `xml:"Schema>Field"` +} + +// MarshalXML implements the xml.Marshaller interface for type ArrowConfiguration. +func (a ArrowConfiguration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ArrowConfiguration + aux := &struct { + *alias + Schema *[]*ArrowField `xml:"Schema>Field"` + }{ + alias: (*alias)(&a), + } + if a.Schema != nil { + aux.Schema = &a.Schema + } + return e.EncodeElement(aux, start) +} + +// ArrowField - Groups settings regarding specific field of an arrow schema +type ArrowField struct { + // REQUIRED + Type *string `xml:"Type"` + Name *string `xml:"Name"` + Precision *int32 `xml:"Precision"` + Scale *int32 `xml:"Scale"` +} + +// BlobFlatListSegment struct +type BlobFlatListSegment struct { + // REQUIRED + BlobItems []*BlobItemInternal `xml:"Blob"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlobFlatListSegment. +func (b BlobFlatListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobFlatListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItemInternal `xml:"Blob"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + return e.EncodeElement(aux, start) +} + +// BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +type BlobHTTPHeaders struct { + // Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request. + BlobCacheControl *string + // Optional. Sets the blob's Content-Disposition header. + BlobContentDisposition *string + // Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentEncoding *string + // Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read + // request. + BlobContentLanguage *string + // Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks + // were validated when each was uploaded. + BlobContentMD5 []byte + // Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request. + BlobContentType *string +} + +// BlobHierarchyListSegment struct +type BlobHierarchyListSegment struct { + // REQUIRED + BlobItems []*BlobItemInternal `xml:"Blob"` + BlobPrefixes []*BlobPrefix `xml:"BlobPrefix"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlobHierarchyListSegment. +func (b BlobHierarchyListSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobHierarchyListSegment + aux := &struct { + *alias + BlobItems *[]*BlobItemInternal `xml:"Blob"` + BlobPrefixes *[]*BlobPrefix `xml:"BlobPrefix"` + }{ + alias: (*alias)(&b), + } + if b.BlobItems != nil { + aux.BlobItems = &b.BlobItems + } + if b.BlobPrefixes != nil { + aux.BlobPrefixes = &b.BlobPrefixes + } + return e.EncodeElement(aux, start) +} + +// BlobItemInternal - An Azure Storage blob +type BlobItemInternal struct { + // REQUIRED + Deleted *bool `xml:"Deleted"` + + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a blob + Properties *BlobPropertiesInternal `xml:"Properties"` + + // REQUIRED + Snapshot *string `xml:"Snapshot"` + + // Blob tags + BlobTags *BlobTags `xml:"Tags"` + HasVersionsOnly *bool `xml:"HasVersionsOnly"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + + // Dictionary of + OrMetadata map[string]*string `xml:"OrMetadata"` + VersionID *string `xml:"VersionId"` +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItemInternal. +func (b *BlobItemInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias BlobItemInternal + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + OrMetadata additionalProperties `xml:"OrMetadata"` + }{ + alias: (*alias)(b), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + b.Metadata = (map[string]*string)(aux.Metadata) + b.OrMetadata = (map[string]*string)(aux.OrMetadata) + return nil +} + +// BlobPrefix struct +type BlobPrefix struct { + // REQUIRED + Name *string `xml:"Name"` +} + +// BlobPropertiesInternal - Properties of a blob +type BlobPropertiesInternal struct { + // REQUIRED + Etag *string `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + AccessTier *AccessTier `xml:"AccessTier"` + AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"` + AccessTierInferred *bool `xml:"AccessTierInferred"` + ArchiveStatus *ArchiveStatus `xml:"ArchiveStatus"` + BlobSequenceNumber *int64 `xml:"x-ms-blob-sequence-number"` + BlobType *BlobType `xml:"BlobType"` + CacheControl *string `xml:"Cache-Control"` + ContentDisposition *string `xml:"Content-Disposition"` + ContentEncoding *string `xml:"Content-Encoding"` + ContentLanguage *string `xml:"Content-Language"` + + // Size in bytes + ContentLength *int64 `xml:"Content-Length"` + ContentMD5 []byte `xml:"Content-MD5"` + ContentType *string `xml:"Content-Type"` + CopyCompletionTime *time.Time `xml:"CopyCompletionTime"` + CopyID *string `xml:"CopyId"` + CopyProgress *string `xml:"CopyProgress"` + CopySource *string `xml:"CopySource"` + CopyStatus *CopyStatusType `xml:"CopyStatus"` + CopyStatusDescription *string `xml:"CopyStatusDescription"` + CreationTime *time.Time `xml:"Creation-Time"` + CustomerProvidedKeySHA256 *string `xml:"CustomerProvidedKeySha256"` + DeletedTime *time.Time `xml:"DeletedTime"` + DestinationSnapshot *string `xml:"DestinationSnapshot"` + + // The name of the encryption scope under which the blob is encrypted. + EncryptionScope *string `xml:"EncryptionScope"` + ExpiresOn *time.Time `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"` + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"` + IncrementalCopy *bool `xml:"IncrementalCopy"` + IsSealed *bool `xml:"Sealed"` + LastAccessedOn *time.Time `xml:"LastAccessTime"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + LegalHold *bool `xml:"LegalHold"` + + // If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High + // and Standard. + RehydratePriority *RehydratePriority `xml:"RehydratePriority"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` + ServerEncrypted *bool `xml:"ServerEncrypted"` + TagCount *int32 `xml:"TagCount"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlobPropertiesInternal. +func (b BlobPropertiesInternal) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlobPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *[]byte `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&b), + AccessTierChangeTime: (*timeRFC1123)(b.AccessTierChangeTime), + CopyCompletionTime: (*timeRFC1123)(b.CopyCompletionTime), + CreationTime: (*timeRFC1123)(b.CreationTime), + DeletedTime: (*timeRFC1123)(b.DeletedTime), + ExpiresOn: (*timeRFC1123)(b.ExpiresOn), + ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn), + LastAccessedOn: (*timeRFC1123)(b.LastAccessedOn), + LastModified: (*timeRFC1123)(b.LastModified), + } + if b.ContentMD5 != nil { + aux.ContentMD5 = &b.ContentMD5 + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPropertiesInternal. +func (b *BlobPropertiesInternal) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias BlobPropertiesInternal + aux := &struct { + *alias + AccessTierChangeTime *timeRFC1123 `xml:"AccessTierChangeTime"` + ContentMD5 *[]byte `xml:"Content-MD5"` + CopyCompletionTime *timeRFC1123 `xml:"CopyCompletionTime"` + CreationTime *timeRFC1123 `xml:"Creation-Time"` + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + ExpiresOn *timeRFC1123 `xml:"Expiry-Time"` + ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"` + LastAccessedOn *timeRFC1123 `xml:"LastAccessTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(b), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime) + b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime) + b.CreationTime = (*time.Time)(aux.CreationTime) + b.DeletedTime = (*time.Time)(aux.DeletedTime) + b.ExpiresOn = (*time.Time)(aux.ExpiresOn) + b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn) + b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn) + b.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// BlobTag struct +type BlobTag struct { + // REQUIRED + Key *string `xml:"Key"` + + // REQUIRED + Value *string `xml:"Value"` +} + +// BlobTags - Blob tags +type BlobTags struct { + // REQUIRED + BlobTagSet []*BlobTag `xml:"TagSet>Tag"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlobTags. +func (b BlobTags) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "Tags" + type alias BlobTags + aux := &struct { + *alias + BlobTagSet *[]*BlobTag `xml:"TagSet>Tag"` + }{ + alias: (*alias)(&b), + } + if b.BlobTagSet != nil { + aux.BlobTagSet = &b.BlobTagSet + } + return e.EncodeElement(aux, start) +} + +// Block - Represents a single block in a block blob. It describes the block's ID and size. +type Block struct { + // REQUIRED; The base64 encoded block ID. + Name *string `xml:"Name"` + + // REQUIRED; The block size in bytes. + Size *int64 `xml:"Size"` +} + +// BlockList struct +type BlockList struct { + CommittedBlocks []*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlockList. +func (b BlockList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias BlockList + aux := &struct { + *alias + CommittedBlocks *[]*Block `xml:"CommittedBlocks>Block"` + UncommittedBlocks *[]*Block `xml:"UncommittedBlocks>Block"` + }{ + alias: (*alias)(&b), + } + if b.CommittedBlocks != nil { + aux.CommittedBlocks = &b.CommittedBlocks + } + if b.UncommittedBlocks != nil { + aux.UncommittedBlocks = &b.UncommittedBlocks + } + return e.EncodeElement(aux, start) +} + +// BlockLookupList struct +type BlockLookupList struct { + Committed []*string `xml:"Committed"` + Latest []*string `xml:"Latest"` + Uncommitted []*string `xml:"Uncommitted"` +} + +// MarshalXML implements the xml.Marshaller interface for type BlockLookupList. +func (b BlockLookupList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "BlockList" + type alias BlockLookupList + aux := &struct { + *alias + Committed *[]*string `xml:"Committed"` + Latest *[]*string `xml:"Latest"` + Uncommitted *[]*string `xml:"Uncommitted"` + }{ + alias: (*alias)(&b), + } + if b.Committed != nil { + aux.Committed = &b.Committed + } + if b.Latest != nil { + aux.Latest = &b.Latest + } + if b.Uncommitted != nil { + aux.Uncommitted = &b.Uncommitted + } + return e.EncodeElement(aux, start) +} + +// ClearRange enum +type ClearRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// ContainerCpkScopeInfo contains a group of parameters for the containerClient.Create method. +type ContainerCpkScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all + // future writes. + DefaultEncryptionScope *string + // Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than + // the scope set on the container. + PreventEncryptionScopeOverride *bool +} + +// ContainerItem - An Azure Storage container +type ContainerItem struct { + // REQUIRED + Name *string `xml:"Name"` + + // REQUIRED; Properties of a container + Properties *ContainerProperties `xml:"Properties"` + Deleted *bool `xml:"Deleted"` + + // Dictionary of + Metadata map[string]*string `xml:"Metadata"` + Version *string `xml:"Version"` +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerItem. +func (c *ContainerItem) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias ContainerItem + aux := &struct { + *alias + Metadata additionalProperties `xml:"Metadata"` + }{ + alias: (*alias)(c), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + c.Metadata = (map[string]*string)(aux.Metadata) + return nil +} + +// ContainerProperties - Properties of a container +type ContainerProperties struct { + // REQUIRED + Etag *string `xml:"Etag"` + + // REQUIRED + LastModified *time.Time `xml:"Last-Modified"` + DefaultEncryptionScope *string `xml:"DefaultEncryptionScope"` + DeletedTime *time.Time `xml:"DeletedTime"` + HasImmutabilityPolicy *bool `xml:"HasImmutabilityPolicy"` + HasLegalHold *bool `xml:"HasLegalHold"` + + // Indicates if version level worm is enabled on this container. + IsImmutableStorageWithVersioningEnabled *bool `xml:"ImmutableStorageWithVersioningEnabled"` + LeaseDuration *LeaseDurationType `xml:"LeaseDuration"` + LeaseState *LeaseStateType `xml:"LeaseState"` + LeaseStatus *LeaseStatusType `xml:"LeaseStatus"` + PreventEncryptionScopeOverride *bool `xml:"DenyEncryptionScopeOverride"` + PublicAccess *PublicAccessType `xml:"PublicAccess"` + RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"` +} + +// MarshalXML implements the xml.Marshaller interface for type ContainerProperties. +func (c ContainerProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(&c), + DeletedTime: (*timeRFC1123)(c.DeletedTime), + LastModified: (*timeRFC1123)(c.LastModified), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type ContainerProperties. +func (c *ContainerProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias ContainerProperties + aux := &struct { + *alias + DeletedTime *timeRFC1123 `xml:"DeletedTime"` + LastModified *timeRFC1123 `xml:"Last-Modified"` + }{ + alias: (*alias)(c), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + c.DeletedTime = (*time.Time)(aux.DeletedTime) + c.LastModified = (*time.Time)(aux.LastModified) + return nil +} + +// CorsRule - CORS is an HTTP feature that enables a web application running under one domain to access resources in another +// domain. Web browsers implement a security restriction known as same-origin policy that +// prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin +// domain) to call APIs in another domain +type CorsRule struct { + // REQUIRED; the request headers that the origin domain may specify on the CORS request. + AllowedHeaders *string `xml:"AllowedHeaders"` + + // REQUIRED; The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) + AllowedMethods *string `xml:"AllowedMethods"` + + // REQUIRED; The origin domains that are permitted to make a request against the storage service via CORS. The origin domain + // is the domain from which the request originates. Note that the origin must be an exact + // case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' + // to allow all origin domains to make requests via CORS. + AllowedOrigins *string `xml:"AllowedOrigins"` + + // REQUIRED; The response headers that may be sent in the response to the CORS request and exposed by the browser to the request + // issuer + ExposedHeaders *string `xml:"ExposedHeaders"` + + // REQUIRED; The maximum amount time that a browser should cache the preflight OPTIONS request. + MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"` +} + +// CpkInfo contains a group of parameters for the blobClient.Download method. +type CpkInfo struct { + // The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided + // if the x-ms-encryption-key header is provided. + EncryptionAlgorithm *EncryptionAlgorithmType + // Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption + // is performed with the root account encryption key. For more information, see + // Encryption at Rest for Azure Storage Services. + EncryptionKey *string + // The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided. + EncryptionKeySHA256 *string +} + +// CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +type CpkScopeInfo struct { + // Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided + // in the request. If not specified, encryption is performed with the default + // account encryption scope. For more information, see Encryption at Rest for Azure Storage Services. + EncryptionScope *string +} + +// DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted. +type DelimitedTextConfiguration struct { + // The string used to separate columns. + ColumnSeparator *string `xml:"ColumnSeparator"` + + // The string used as an escape character. + EscapeChar *string `xml:"EscapeChar"` + + // The string used to quote a specific field. + FieldQuote *string `xml:"FieldQuote"` + + // Represents whether the data has headers. + HeadersPresent *bool `xml:"HasHeaders"` + + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// FilterBlobItem - Blob info from a Filter Blobs API call +type FilterBlobItem struct { + // REQUIRED + ContainerName *string `xml:"ContainerName"` + + // REQUIRED + Name *string `xml:"Name"` + + // Blob tags + Tags *BlobTags `xml:"Tags"` +} + +// FilterBlobSegment - The result of a Filter Blobs API call +type FilterBlobSegment struct { + // REQUIRED + Blobs []*FilterBlobItem `xml:"Blobs>Blob"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + + // REQUIRED + Where *string `xml:"Where"` + NextMarker *string `xml:"NextMarker"` +} + +// MarshalXML implements the xml.Marshaller interface for type FilterBlobSegment. +func (f FilterBlobSegment) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias FilterBlobSegment + aux := &struct { + *alias + Blobs *[]*FilterBlobItem `xml:"Blobs>Blob"` + }{ + alias: (*alias)(&f), + } + if f.Blobs != nil { + aux.Blobs = &f.Blobs + } + return e.EncodeElement(aux, start) +} + +// GeoReplication - Geo-Replication information for the Secondary Storage Service +type GeoReplication struct { + // REQUIRED; A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available + // for read operations at the secondary. Primary writes after this point in time may or may + // not be available for reads. + LastSyncTime *time.Time `xml:"LastSyncTime"` + + // REQUIRED; The status of the secondary location + Status *BlobGeoReplicationStatus `xml:"Status"` +} + +// MarshalXML implements the xml.Marshaller interface for type GeoReplication. +func (g GeoReplication) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(&g), + LastSyncTime: (*timeRFC1123)(g.LastSyncTime), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type GeoReplication. +func (g *GeoReplication) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias GeoReplication + aux := &struct { + *alias + LastSyncTime *timeRFC1123 `xml:"LastSyncTime"` + }{ + alias: (*alias)(g), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + g.LastSyncTime = (*time.Time)(aux.LastSyncTime) + return nil +} + +// JSONTextConfiguration - json text configuration +type JSONTextConfiguration struct { + // The string used to separate records. + RecordSeparator *string `xml:"RecordSeparator"` +} + +// KeyInfo - Key information +type KeyInfo struct { + // REQUIRED; The date-time the key expires in ISO 8601 UTC time + Expiry *string `xml:"Expiry"` + + // REQUIRED; The date-time the key is active in ISO 8601 UTC time + Start *string `xml:"Start"` +} + +// LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +type LeaseAccessConditions struct { + // If specified, the operation only succeeds if the resource's lease is active and matches this ID. + LeaseID *string +} + +// ListBlobsFlatSegmentResponse - An enumeration of blobs +type ListBlobsFlatSegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobFlatListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListBlobsHierarchySegmentResponse - An enumeration of blobs +type ListBlobsHierarchySegmentResponse struct { + // REQUIRED + ContainerName *string `xml:"ContainerName,attr"` + + // REQUIRED + Segment *BlobHierarchyListSegment `xml:"Blobs"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Delimiter *string `xml:"Delimiter"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// ListContainersSegmentResponse - An enumeration of containers +type ListContainersSegmentResponse struct { + // REQUIRED + ContainerItems []*ContainerItem `xml:"Containers>Container"` + + // REQUIRED + ServiceEndpoint *string `xml:"ServiceEndpoint,attr"` + Marker *string `xml:"Marker"` + MaxResults *int32 `xml:"MaxResults"` + NextMarker *string `xml:"NextMarker"` + Prefix *string `xml:"Prefix"` +} + +// MarshalXML implements the xml.Marshaller interface for type ListContainersSegmentResponse. +func (l ListContainersSegmentResponse) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias ListContainersSegmentResponse + aux := &struct { + *alias + ContainerItems *[]*ContainerItem `xml:"Containers>Container"` + }{ + alias: (*alias)(&l), + } + if l.ContainerItems != nil { + aux.ContainerItems = &l.ContainerItems + } + return e.EncodeElement(aux, start) +} + +// Logging - Azure Analytics Logging settings. +type Logging struct { + // REQUIRED; Indicates whether all delete requests should be logged. + Delete *bool `xml:"Delete"` + + // REQUIRED; Indicates whether all read requests should be logged. + Read *bool `xml:"Read"` + + // REQUIRED; the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // REQUIRED; The version of Storage Analytics to configure. + Version *string `xml:"Version"` + + // REQUIRED; Indicates whether all write requests should be logged. + Write *bool `xml:"Write"` +} + +// Metrics - a summary of request statistics grouped by API in hour or minute aggregates for blobs +type Metrics struct { + // REQUIRED; Indicates whether metrics are enabled for the Blob service. + Enabled *bool `xml:"Enabled"` + + // Indicates whether metrics should generate summary statistics for called API operations. + IncludeAPIs *bool `xml:"IncludeAPIs"` + + // the retention policy which determines how long the associated data should persist + RetentionPolicy *RetentionPolicy `xml:"RetentionPolicy"` + + // The version of Storage Analytics to configure. + Version *string `xml:"Version"` +} + +// ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +type ModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + IfMatch *string + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + IfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + IfNoneMatch *string + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + IfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + IfUnmodifiedSince *time.Time +} + +// PageList - the list of pages +type PageList struct { + ClearRange []*ClearRange `xml:"ClearRange"` + NextMarker *string `xml:"NextMarker"` + PageRange []*PageRange `xml:"PageRange"` +} + +// MarshalXML implements the xml.Marshaller interface for type PageList. +func (p PageList) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias PageList + aux := &struct { + *alias + ClearRange *[]*ClearRange `xml:"ClearRange"` + PageRange *[]*PageRange `xml:"PageRange"` + }{ + alias: (*alias)(&p), + } + if p.ClearRange != nil { + aux.ClearRange = &p.ClearRange + } + if p.PageRange != nil { + aux.PageRange = &p.PageRange + } + return e.EncodeElement(aux, start) +} + +// PageRange struct +type PageRange struct { + // REQUIRED + End *int64 `xml:"End"` + + // REQUIRED + Start *int64 `xml:"Start"` +} + +// QueryFormat struct +type QueryFormat struct { + // REQUIRED; The quick query format type. + Type *QueryFormatType `xml:"Type"` + + // Groups the settings used for formatting the response if the response should be Arrow formatted. + ArrowConfiguration *ArrowConfiguration `xml:"ArrowConfiguration"` + + // Groups the settings used for interpreting the blob data if the blob is delimited text formatted. + DelimitedTextConfiguration *DelimitedTextConfiguration `xml:"DelimitedTextConfiguration"` + + // json text configuration + JSONTextConfiguration *JSONTextConfiguration `xml:"JsonTextConfiguration"` + + // Anything + ParquetTextConfiguration interface{} `xml:"ParquetTextConfiguration"` +} + +// QueryRequest - Groups the set of query request settings. +type QueryRequest struct { + // REQUIRED; The query expression in SQL. The maximum size of the query expression is 256KiB. + Expression *string `xml:"Expression"` + + // REQUIRED; Required. The type of the provided query expression. + QueryType *string `xml:"QueryType"` + InputSerialization *QuerySerialization `xml:"InputSerialization"` + OutputSerialization *QuerySerialization `xml:"OutputSerialization"` +} + +// MarshalXML implements the xml.Marshaller interface for type QueryRequest. +func (q QueryRequest) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "QueryRequest" + type alias QueryRequest + aux := &struct { + *alias + }{ + alias: (*alias)(&q), + } + return e.EncodeElement(aux, start) +} + +//QuerySerialization struct +type QuerySerialization struct { + // REQUIRED + Format *QueryFormat `xml:"Format"` +} + +// RetentionPolicy - the retention policy which determines how long the associated data should persist +type RetentionPolicy struct { + // REQUIRED; Indicates whether a retention policy is enabled for the storage service + Enabled *bool `xml:"Enabled"` + + // Indicates whether permanent delete is allowed on this storage account. + AllowPermanentDelete *bool `xml:"AllowPermanentDelete"` + + // Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this + // value will be deleted + Days *int32 `xml:"Days"` +} + +// SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages method. +type SequenceNumberAccessConditions struct { + // Specify this header value to operate only on a blob if it has the specified sequence number. + IfSequenceNumberEqualTo *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than the specified. + IfSequenceNumberLessThan *int64 + // Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified. + IfSequenceNumberLessThanOrEqualTo *int64 +} + +// SignedIdentifier - signed identifier +type SignedIdentifier struct { + // REQUIRED; An Access policy + AccessPolicy *AccessPolicy `xml:"AccessPolicy"` + + // REQUIRED; a unique id + ID *string `xml:"Id"` +} + +// SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL method. +type SourceModifiedAccessConditions struct { + // Specify an ETag value to operate only on blobs with a matching value. + SourceIfMatch *string + // Specify this header value to operate only on a blob if it has been modified since the specified date/time. + SourceIfModifiedSince *time.Time + // Specify an ETag value to operate only on blobs without a matching value. + SourceIfNoneMatch *string + // Specify a SQL where clause on blob tags to operate only on blobs with a matching value. + SourceIfTags *string + // Specify this header value to operate only on a blob if it has not been modified since the specified date/time. + SourceIfUnmodifiedSince *time.Time +} + +// StaticWebsite - The properties that enable an account to host a static website +type StaticWebsite struct { + // REQUIRED; Indicates whether this account is hosting a static website + Enabled *bool `xml:"Enabled"` + + // Absolute path of the default index page + DefaultIndexDocumentPath *string `xml:"DefaultIndexDocumentPath"` + + // The absolute path of the custom 404 page + ErrorDocument404Path *string `xml:"ErrorDocument404Path"` + + // The default name of the index page under each directory + IndexDocument *string `xml:"IndexDocument"` +} + +// StorageServiceProperties - Storage Service Properties. +type StorageServiceProperties struct { + // The set of CORS rules. + Cors []*CorsRule `xml:"Cors>CorsRule"` + + // The default version to use for requests to the Blob service if an incoming request's version is not specified. Possible + // values include version 2008-10-27 and all more recent versions + DefaultServiceVersion *string `xml:"DefaultServiceVersion"` + + // the retention policy which determines how long the associated data should persist + DeleteRetentionPolicy *RetentionPolicy `xml:"DeleteRetentionPolicy"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + HourMetrics *Metrics `xml:"HourMetrics"` + + // Azure Analytics Logging settings. + Logging *Logging `xml:"Logging"` + + // a summary of request statistics grouped by API in hour or minute aggregates for blobs + MinuteMetrics *Metrics `xml:"MinuteMetrics"` + + // The properties that enable an account to host a static website + StaticWebsite *StaticWebsite `xml:"StaticWebsite"` +} + +// MarshalXML implements the xml.Marshaller interface for type StorageServiceProperties. +func (s StorageServiceProperties) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias StorageServiceProperties + aux := &struct { + *alias + Cors *[]*CorsRule `xml:"Cors>CorsRule"` + }{ + alias: (*alias)(&s), + } + if s.Cors != nil { + aux.Cors = &s.Cors + } + return e.EncodeElement(aux, start) +} + +// StorageServiceStats - Stats for the storage service. +type StorageServiceStats struct { + // Geo-Replication information for the Secondary Storage Service + GeoReplication *GeoReplication `xml:"GeoReplication"` +} + +// UserDelegationKey - A user delegation key +type UserDelegationKey struct { + // REQUIRED; The date-time the key expires + SignedExpiry *time.Time `xml:"SignedExpiry"` + + // REQUIRED; The Azure Active Directory object ID in GUID format. + SignedOid *string `xml:"SignedOid"` + + // REQUIRED; Abbreviation of the Azure Storage service that accepts the key + SignedService *string `xml:"SignedService"` + + // REQUIRED; The date-time the key is active + SignedStart *time.Time `xml:"SignedStart"` + + // REQUIRED; The Azure Active Directory tenant ID in GUID format + SignedTid *string `xml:"SignedTid"` + + // REQUIRED; The service version that created the key + SignedVersion *string `xml:"SignedVersion"` + + // REQUIRED; The key as a base64 string + Value *string `xml:"Value"` +} + +// MarshalXML implements the xml.Marshaller interface for type UserDelegationKey. +func (u UserDelegationKey) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(&u), + SignedExpiry: (*timeRFC3339)(u.SignedExpiry), + SignedStart: (*timeRFC3339)(u.SignedStart), + } + return e.EncodeElement(aux, start) +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type UserDelegationKey. +func (u *UserDelegationKey) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + type alias UserDelegationKey + aux := &struct { + *alias + SignedExpiry *timeRFC3339 `xml:"SignedExpiry"` + SignedStart *timeRFC3339 `xml:"SignedStart"` + }{ + alias: (*alias)(u), + } + if err := d.DecodeElement(aux, &start); err != nil { + return err + } + u.SignedExpiry = (*time.Time)(aux.SignedExpiry) + u.SignedStart = (*time.Time)(aux.SignedStart) + return nil +} + +// appendBlobClientAppendBlockFromURLOptions contains the optional parameters for the appendBlobClient.AppendBlockFromURL +// method. +type appendBlobClientAppendBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// appendBlobClientAppendBlockOptions contains the optional parameters for the appendBlobClient.AppendBlock method. +type appendBlobClientAppendBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// appendBlobClientCreateOptions contains the optional parameters for the appendBlobClient.Create method. +type appendBlobClientCreateOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// appendBlobClientSealOptions contains the optional parameters for the appendBlobClient.Seal method. +type appendBlobClientSealOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientAbortCopyFromURLOptions contains the optional parameters for the blobClient.AbortCopyFromURL method. +type blobClientAbortCopyFromURLOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientAcquireLeaseOptions contains the optional parameters for the blobClient.AcquireLease method. +type blobClientAcquireLeaseOptions struct { + // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease + // can be between 15 and 60 seconds. A lease duration cannot be changed using + // renew or change. + Duration *int32 + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientBreakLeaseOptions contains the optional parameters for the blobClient.BreakLease method. +type blobClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientChangeLeaseOptions contains the optional parameters for the blobClient.ChangeLease method. +type blobClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientCopyFromURLOptions contains the optional parameters for the blobClient.CopyFromURL method. +type blobClientCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientCreateSnapshotOptions contains the optional parameters for the blobClient.CreateSnapshot method. +type blobClientCreateSnapshotOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the blobClient.DeleteImmutabilityPolicy +// method. +type blobClientDeleteImmutabilityPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientDeleteOptions contains the optional parameters for the blobClient.Delete method. +type blobClientDeleteOptions struct { + // Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled. + BlobDeleteType *BlobDeleteType + // Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob + // and all of its snapshots. only: Delete only the blob's snapshots and not the blob + // itself + DeleteSnapshots *DeleteSnapshotsOptionType + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientDownloadOptions contains the optional parameters for the blobClient.Download method. +type blobClientDownloadOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentCRC64 *bool + // When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the + // range is less than or equal to 4 MB in size. + RangeGetContentMD5 *bool + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientGetAccountInfoOptions contains the optional parameters for the blobClient.GetAccountInfo method. +type blobClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// blobClientGetPropertiesOptions contains the optional parameters for the blobClient.GetProperties method. +type blobClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientGetTagsOptions contains the optional parameters for the blobClient.GetTags method. +type blobClientGetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientQueryOptions contains the optional parameters for the blobClient.Query method. +type blobClientQueryOptions struct { + // the query request + QueryRequest *QueryRequest + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientReleaseLeaseOptions contains the optional parameters for the blobClient.ReleaseLease method. +type blobClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientRenewLeaseOptions contains the optional parameters for the blobClient.RenewLease method. +type blobClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetExpiryOptions contains the optional parameters for the blobClient.SetExpiry method. +type blobClientSetExpiryOptions struct { + // The time to set the blob to expiry + ExpiresOn *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetHTTPHeadersOptions contains the optional parameters for the blobClient.SetHTTPHeaders method. +type blobClientSetHTTPHeadersOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetImmutabilityPolicyOptions contains the optional parameters for the blobClient.SetImmutabilityPolicy method. +type blobClientSetImmutabilityPolicyOptions struct { + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetLegalHoldOptions contains the optional parameters for the blobClient.SetLegalHold method. +type blobClientSetLegalHoldOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetMetadataOptions contains the optional parameters for the blobClient.SetMetadata method. +type blobClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientSetTagsOptions contains the optional parameters for the blobClient.SetTags method. +type blobClientSetTagsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Blob tags + Tags *BlobTags + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientSetTierOptions contains the optional parameters for the blobClient.SetTier method. +type blobClientSetTierOptions struct { + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on. + // It's for service version 2019-10-10 and newer. + VersionID *string +} + +// blobClientStartCopyFromURLOptions contains the optional parameters for the blobClient.StartCopyFromURL method. +type blobClientStartCopyFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Optional: Indicates the priority with which to rehydrate an archived blob. + RehydratePriority *RehydratePriority + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer. + SealBlob *bool + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blobClientUndeleteOptions contains the optional parameters for the blobClient.Undelete method. +type blobClientUndeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blockBlobClientCommitBlockListOptions contains the optional parameters for the blockBlobClient.CommitBlockList method. +type blockBlobClientCommitBlockListOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// blockBlobClientGetBlockListOptions contains the optional parameters for the blockBlobClient.GetBlockList method. +type blockBlobClientGetBlockListOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blockBlobClientPutBlobFromURLOptions contains the optional parameters for the blockBlobClient.PutBlobFromURL method. +type blockBlobClientPutBlobFromURLOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Optional, default is true. Indicates if properties from the source blob should be copied. + CopySourceBlobProperties *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// blockBlobClientStageBlockFromURLOptions contains the optional parameters for the blockBlobClient.StageBlockFromURL method. +type blockBlobClientStageBlockFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // Bytes of source data in the specified range. + SourceRange *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// blockBlobClientStageBlockOptions contains the optional parameters for the blockBlobClient.StageBlock method. +type blockBlobClientStageBlockOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// blockBlobClientUploadOptions contains the optional parameters for the blockBlobClient.Upload method. +type blockBlobClientUploadOptions struct { + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the blob. + Tier *AccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// containerClientAcquireLeaseOptions contains the optional parameters for the containerClient.AcquireLease method. +type containerClientAcquireLeaseOptions struct { + // Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite lease + // can be between 15 and 60 seconds. A lease duration cannot be changed using + // renew or change. + Duration *int32 + // Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is + // not in the correct format. See Guid Constructor (String) for a list of valid GUID + // string formats. + ProposedLeaseID *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientBreakLeaseOptions contains the optional parameters for the containerClient.BreakLease method. +type containerClientBreakLeaseOptions struct { + // For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This + // break period is only used if it is shorter than the time remaining on the + // lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has + // expired, but the lease may be held for longer than the break period. If this + // header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses, + // and an infinite lease breaks immediately. + BreakPeriod *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientChangeLeaseOptions contains the optional parameters for the containerClient.ChangeLease method. +type containerClientChangeLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientCreateOptions contains the optional parameters for the containerClient.Create method. +type containerClientCreateOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientDeleteOptions contains the optional parameters for the containerClient.Delete method. +type containerClientDeleteOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientGetAccessPolicyOptions contains the optional parameters for the containerClient.GetAccessPolicy method. +type containerClientGetAccessPolicyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientGetAccountInfoOptions contains the optional parameters for the containerClient.GetAccountInfo method. +type containerClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// containerClientGetPropertiesOptions contains the optional parameters for the containerClient.GetProperties method. +type containerClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientListBlobFlatSegmentOptions contains the optional parameters for the containerClient.ListBlobFlatSegment +// method. +type containerClientListBlobFlatSegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientListBlobHierarchySegmentOptions contains the optional parameters for the containerClient.ListBlobHierarchySegment +// method. +type containerClientListBlobHierarchySegmentOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []ListBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientReleaseLeaseOptions contains the optional parameters for the containerClient.ReleaseLease method. +type containerClientReleaseLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientRenameOptions contains the optional parameters for the containerClient.Rename method. +type containerClientRenameOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match. + SourceLeaseID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientRenewLeaseOptions contains the optional parameters for the containerClient.RenewLease method. +type containerClientRenewLeaseOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientRestoreOptions contains the optional parameters for the containerClient.Restore method. +type containerClientRestoreOptions struct { + // Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore. + DeletedContainerName *string + // Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore. + DeletedContainerVersion *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientSetAccessPolicyOptions contains the optional parameters for the containerClient.SetAccessPolicy method. +type containerClientSetAccessPolicyOptions struct { + // Specifies whether data in the container may be accessed publicly and the level of access + Access *PublicAccessType + // the acls for the container + ContainerACL []*SignedIdentifier + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientSetMetadataOptions contains the optional parameters for the containerClient.SetMetadata method. +type containerClientSetMetadataOptions struct { + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// containerClientSubmitBatchOptions contains the optional parameters for the containerClient.SubmitBatch method. +type containerClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientClearPagesOptions contains the optional parameters for the pageBlobClient.ClearPages method. +type pageBlobClientClearPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientCopyIncrementalOptions contains the optional parameters for the pageBlobClient.CopyIncremental method. +type pageBlobClientCopyIncrementalOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientCreateOptions contains the optional parameters for the pageBlobClient.Create method. +type pageBlobClientCreateOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + // Optional. Used to set blob tags in various blob operations. + BlobTagsString *string + // Specifies the date time when the blobs immutability policy is set to expire. + ImmutabilityPolicyExpiry *time.Time + // Specifies the immutability policy mode to set on the blob. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + // Specified if a legal hold should be set on the blob. + LegalHold *bool + // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the + // operation will copy the metadata from the source blob or file to the destination + // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata + // is not copied from the source blob or file. Note that beginning with + // version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers, + // Blobs, and Metadata for more information. + Metadata map[string]string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Optional. Indicates the tier to be set on the page blob. + Tier *PremiumPageBlobAccessTier + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientGetPageRangesDiffOptions contains the optional parameters for the pageBlobClient.GetPageRangesDiff method. +type pageBlobClientGetPageRangesDiffOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot + // of the target blob. The response will only contain pages that were changed + // between the target blob and its previous snapshot. + PrevSnapshotURL *string + // Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response + // will contain only pages that were changed between target blob and previous + // snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot + // specified by prevsnapshot is the older of the two. Note that incremental + // snapshots are currently supported only for blobs created on or after January 1, 2016. + Prevsnapshot *string + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientGetPageRangesOptions contains the optional parameters for the pageBlobClient.GetPageRanges method. +type pageBlobClientGetPageRangesOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more + // information on working with blob snapshots, see Creating a Snapshot of a Blob. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob] + Snapshot *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientResizeOptions contains the optional parameters for the pageBlobClient.Resize method. +type pageBlobClientResizeOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the pageBlobClient.UpdateSequenceNumber +// method. +type pageBlobClientUpdateSequenceNumberOptions struct { + // Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of + // the sequence number must be between 0 and 2^63 - 1. + BlobSequenceNumber *int64 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientUploadPagesFromURLOptions contains the optional parameters for the pageBlobClient.UploadPagesFromURL method. +type pageBlobClientUploadPagesFromURLOptions struct { + // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. + CopySourceAuthorization *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // Specify the md5 calculated for the range of bytes that must be read from the copy source. + SourceContentMD5 []byte + // Specify the crc64 calculated for the range of bytes that must be read from the copy source. + SourceContentcrc64 []byte + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// pageBlobClientUploadPagesOptions contains the optional parameters for the pageBlobClient.UploadPages method. +type pageBlobClientUploadPagesOptions struct { + // Return only the bytes of the blob in the specified range. + Range *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte + // Specify the transactional md5 for the body, to be validated by the service. + TransactionalContentMD5 []byte +} + +// serviceClientFilterBlobsOptions contains the optional parameters for the serviceClient.FilterBlobs method. +type serviceClientFilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 + // Filters the results to return only to return only blobs whose tags match the specified expression. + Where *string +} + +// serviceClientGetAccountInfoOptions contains the optional parameters for the serviceClient.GetAccountInfo method. +type serviceClientGetAccountInfoOptions struct { + // placeholder for future optional parameters +} + +// serviceClientGetPropertiesOptions contains the optional parameters for the serviceClient.GetProperties method. +type serviceClientGetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// serviceClientGetStatisticsOptions contains the optional parameters for the serviceClient.GetStatistics method. +type serviceClientGetStatisticsOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// serviceClientGetUserDelegationKeyOptions contains the optional parameters for the serviceClient.GetUserDelegationKey method. +type serviceClientGetUserDelegationKeyOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// serviceClientListContainersSegmentOptions contains the optional parameters for the serviceClient.ListContainersSegment +// method. +type serviceClientListContainersSegmentOptions struct { + // Include this parameter to specify that the container's metadata be returned as part of the response body. + Include []ListContainersIncludeType + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Filters the results to return only containers whose name begins with the specified prefix. + Prefix *string + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// serviceClientSetPropertiesOptions contains the optional parameters for the serviceClient.SetProperties method. +type serviceClientSetPropertiesOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + +// serviceClientSubmitBatchOptions contains the optional parameters for the serviceClient.SubmitBatch method. +type serviceClientSubmitBatchOptions struct { + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go new file mode 100644 index 000000000..bad81201b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pageblob_client.go @@ -0,0 +1,1247 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "encoding/base64" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "time" +) + +type pageBlobClient struct { + endpoint string + pl runtime.Pipeline +} + +// newPageBlobClient creates a new instance of pageBlobClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newPageBlobClient(endpoint string, pl runtime.Pipeline) *pageBlobClient { + client := &pageBlobClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// ClearPages - The Clear Pages operation clears a set of pages from a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// pageBlobClientClearPagesOptions - pageBlobClientClearPagesOptions contains the optional parameters for the pageBlobClient.ClearPages +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) ClearPages(ctx context.Context, contentLength int64, pageBlobClientClearPagesOptions *pageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientClearPagesResponse, error) { + req, err := client.clearPagesCreateRequest(ctx, contentLength, pageBlobClientClearPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return pageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp) + } + return client.clearPagesHandleResponse(resp) +} + +// clearPagesCreateRequest creates the ClearPages request. +func (client *pageBlobClient) clearPagesCreateRequest(ctx context.Context, contentLength int64, pageBlobClientClearPagesOptions *pageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientClearPagesOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-page-write", "clear") + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.Range != nil { + req.Raw().Header.Set("x-ms-range", *pageBlobClientClearPagesOptions.Range) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientClearPagesOptions != nil && pageBlobClientClearPagesOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientClearPagesOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// clearPagesHandleResponse handles the ClearPages response. +func (client *pageBlobClient) clearPagesHandleResponse(resp *http.Response) (pageBlobClientClearPagesResponse, error) { + result := pageBlobClientClearPagesResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientClearPagesResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// CopyIncremental - The Copy Incremental operation copies a snapshot of the source page blob to a destination page blob. +// The snapshot is copied such that only the differential changes between the previously copied +// snapshot are transferred to the destination. The copied snapshots are complete copies of the original snapshot and can +// be read or copied from as usual. This API is supported since REST version +// 2016-05-31. +// If the operation fails it returns an *azcore.ResponseError type. +// copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies +// a page blob snapshot. The value should be URL-encoded as it would appear in a request +// URI. The source blob must either be public or must be authenticated via a shared access signature. +// pageBlobClientCopyIncrementalOptions - pageBlobClientCopyIncrementalOptions contains the optional parameters for the pageBlobClient.CopyIncremental +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) CopyIncremental(ctx context.Context, copySource string, pageBlobClientCopyIncrementalOptions *pageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientCopyIncrementalResponse, error) { + req, err := client.copyIncrementalCreateRequest(ctx, copySource, pageBlobClientCopyIncrementalOptions, modifiedAccessConditions) + if err != nil { + return pageBlobClientCopyIncrementalResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientCopyIncrementalResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return pageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp) + } + return client.copyIncrementalHandleResponse(resp) +} + +// copyIncrementalCreateRequest creates the CopyIncremental request. +func (client *pageBlobClient) copyIncrementalCreateRequest(ctx context.Context, copySource string, pageBlobClientCopyIncrementalOptions *pageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "incrementalcopy") + if pageBlobClientCopyIncrementalOptions != nil && pageBlobClientCopyIncrementalOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientCopyIncrementalOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-copy-source", copySource) + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientCopyIncrementalOptions != nil && pageBlobClientCopyIncrementalOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientCopyIncrementalOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// copyIncrementalHandleResponse handles the CopyIncremental response. +func (client *pageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (pageBlobClientCopyIncrementalResponse, error) { + result := pageBlobClientCopyIncrementalResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientCopyIncrementalResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientCopyIncrementalResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-copy-id"); val != "" { + result.CopyID = &val + } + if val := resp.Header.Get("x-ms-copy-status"); val != "" { + result.CopyStatus = (*CopyStatusType)(&val) + } + return result, nil +} + +// Create - The Create operation creates a new page blob. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// pageBlobClientCreateOptions - pageBlobClientCreateOptions contains the optional parameters for the pageBlobClient.Create +// method. +// BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the blobClient.SetHTTPHeaders method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobClientCreateOptions *pageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientCreateResponse, error) { + req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, pageBlobClientCreateOptions, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return pageBlobClientCreateResponse{}, runtime.NewResponseError(resp) + } + return client.createHandleResponse(resp) +} + +// createCreateRequest creates the Create request. +func (client *pageBlobClient) createCreateRequest(ctx context.Context, contentLength int64, blobContentLength int64, pageBlobClientCreateOptions *pageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientCreateOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-blob-type", "PageBlob") + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Tier != nil { + req.Raw().Header.Set("x-ms-access-tier", string(*pageBlobClientCreateOptions.Tier)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil { + req.Raw().Header.Set("x-ms-blob-content-type", *blobHTTPHeaders.BlobContentType) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil { + req.Raw().Header.Set("x-ms-blob-content-encoding", *blobHTTPHeaders.BlobContentEncoding) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil { + req.Raw().Header.Set("x-ms-blob-content-language", *blobHTTPHeaders.BlobContentLanguage) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil { + req.Raw().Header.Set("x-ms-blob-content-md5", base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil { + req.Raw().Header.Set("x-ms-blob-cache-control", *blobHTTPHeaders.BlobCacheControl) + } + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.Metadata != nil { + for k, v := range pageBlobClientCreateOptions.Metadata { + req.Raw().Header.Set("x-ms-meta-"+k, v) + } + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { + req.Raw().Header.Set("x-ms-blob-content-disposition", *blobHTTPHeaders.BlobContentDisposition) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.BlobSequenceNumber != nil { + req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobClientCreateOptions.BlobSequenceNumber, 10)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientCreateOptions.RequestID) + } + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.BlobTagsString != nil { + req.Raw().Header.Set("x-ms-tags", *pageBlobClientCreateOptions.BlobTagsString) + } + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.ImmutabilityPolicyExpiry != nil { + req.Raw().Header.Set("x-ms-immutability-policy-until-date", pageBlobClientCreateOptions.ImmutabilityPolicyExpiry.Format(time.RFC1123)) + } + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.ImmutabilityPolicyMode != nil { + req.Raw().Header.Set("x-ms-immutability-policy-mode", string(*pageBlobClientCreateOptions.ImmutabilityPolicyMode)) + } + if pageBlobClientCreateOptions != nil && pageBlobClientCreateOptions.LegalHold != nil { + req.Raw().Header.Set("x-ms-legal-hold", strconv.FormatBool(*pageBlobClientCreateOptions.LegalHold)) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// createHandleResponse handles the Create response. +func (client *pageBlobClient) createHandleResponse(resp *http.Response) (pageBlobClientCreateResponse, error) { + result := pageBlobClientCreateResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("x-ms-version-id"); val != "" { + result.VersionID = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return pageBlobClientCreateResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// GetPageRanges - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot of a page +// blob +// If the operation fails it returns an *azcore.ResponseError type. +// pageBlobClientGetPageRangesOptions - pageBlobClientGetPageRangesOptions contains the optional parameters for the pageBlobClient.GetPageRanges +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) GetPageRanges(pageBlobClientGetPageRangesOptions *pageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *pageBlobClientGetPageRangesPager { + return &pageBlobClientGetPageRangesPager{ + client: client, + requester: func(ctx context.Context) (*policy.Request, error) { + return client.getPageRangesCreateRequest(ctx, pageBlobClientGetPageRangesOptions, leaseAccessConditions, modifiedAccessConditions) + }, + advancer: func(ctx context.Context, resp pageBlobClientGetPageRangesResponse) (*policy.Request, error) { + return runtime.NewRequest(ctx, http.MethodGet, *resp.PageList.NextMarker) + }, + } +} + +// getPageRangesCreateRequest creates the GetPageRanges request. +func (client *pageBlobClient) getPageRangesCreateRequest(ctx context.Context, pageBlobClientGetPageRangesOptions *pageBlobClientGetPageRangesOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Snapshot != nil { + reqQP.Set("snapshot", *pageBlobClientGetPageRangesOptions.Snapshot) + } + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientGetPageRangesOptions.Timeout), 10)) + } + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Marker != nil { + reqQP.Set("marker", *pageBlobClientGetPageRangesOptions.Marker) + } + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*pageBlobClientGetPageRangesOptions.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.Range != nil { + req.Raw().Header.Set("x-ms-range", *pageBlobClientGetPageRangesOptions.Range) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientGetPageRangesOptions != nil && pageBlobClientGetPageRangesOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientGetPageRangesOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getPageRangesHandleResponse handles the GetPageRanges response. +func (client *pageBlobClient) getPageRangesHandleResponse(resp *http.Response) (pageBlobClientGetPageRangesResponse, error) { + result := pageBlobClientGetPageRangesResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientGetPageRangesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientGetPageRangesResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientGetPageRangesResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return pageBlobClientGetPageRangesResponse{}, err + } + return result, nil +} + +// GetPageRangesDiff - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that were +// changed between target blob and previous snapshot. +// If the operation fails it returns an *azcore.ResponseError type. +// pageBlobClientGetPageRangesDiffOptions - pageBlobClientGetPageRangesDiffOptions contains the optional parameters for the +// pageBlobClient.GetPageRangesDiff method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) GetPageRangesDiff(pageBlobClientGetPageRangesDiffOptions *pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) *pageBlobClientGetPageRangesDiffPager { + return &pageBlobClientGetPageRangesDiffPager{ + client: client, + requester: func(ctx context.Context) (*policy.Request, error) { + return client.getPageRangesDiffCreateRequest(ctx, pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions, modifiedAccessConditions) + }, + advancer: func(ctx context.Context, resp pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) { + return runtime.NewRequest(ctx, http.MethodGet, *resp.PageList.NextMarker) + }, + } +} + +// getPageRangesDiffCreateRequest creates the GetPageRangesDiff request. +func (client *pageBlobClient) getPageRangesDiffCreateRequest(ctx context.Context, pageBlobClientGetPageRangesDiffOptions *pageBlobClientGetPageRangesDiffOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "pagelist") + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Snapshot != nil { + reqQP.Set("snapshot", *pageBlobClientGetPageRangesDiffOptions.Snapshot) + } + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientGetPageRangesDiffOptions.Timeout), 10)) + } + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Prevsnapshot != nil { + reqQP.Set("prevsnapshot", *pageBlobClientGetPageRangesDiffOptions.Prevsnapshot) + } + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Marker != nil { + reqQP.Set("marker", *pageBlobClientGetPageRangesDiffOptions.Marker) + } + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*pageBlobClientGetPageRangesDiffOptions.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.PrevSnapshotURL != nil { + req.Raw().Header.Set("x-ms-previous-snapshot-url", *pageBlobClientGetPageRangesDiffOptions.PrevSnapshotURL) + } + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.Range != nil { + req.Raw().Header.Set("x-ms-range", *pageBlobClientGetPageRangesDiffOptions.Range) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientGetPageRangesDiffOptions != nil && pageBlobClientGetPageRangesDiffOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientGetPageRangesDiffOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getPageRangesDiffHandleResponse handles the GetPageRangesDiff response. +func (client *pageBlobClient) getPageRangesDiffHandleResponse(resp *http.Response) (pageBlobClientGetPageRangesDiffResponse, error) { + result := pageBlobClientGetPageRangesDiffResponse{RawResponse: resp} + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientGetPageRangesDiffResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("x-ms-blob-content-length"); val != "" { + blobContentLength, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientGetPageRangesDiffResponse{}, err + } + result.BlobContentLength = &blobContentLength + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientGetPageRangesDiffResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil { + return pageBlobClientGetPageRangesDiffResponse{}, err + } + return result, nil +} + +// Resize - Resize the Blob +// If the operation fails it returns an *azcore.ResponseError type. +// blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned +// to a 512-byte boundary. +// pageBlobClientResizeOptions - pageBlobClientResizeOptions contains the optional parameters for the pageBlobClient.Resize +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) Resize(ctx context.Context, blobContentLength int64, pageBlobClientResizeOptions *pageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientResizeResponse, error) { + req, err := client.resizeCreateRequest(ctx, blobContentLength, pageBlobClientResizeOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions) + if err != nil { + return pageBlobClientResizeResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientResizeResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return pageBlobClientResizeResponse{}, runtime.NewResponseError(resp) + } + return client.resizeHandleResponse(resp) +} + +// resizeCreateRequest creates the Resize request. +func (client *pageBlobClient) resizeCreateRequest(ctx context.Context, blobContentLength int64, pageBlobClientResizeOptions *pageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if pageBlobClientResizeOptions != nil && pageBlobClientResizeOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientResizeOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-blob-content-length", strconv.FormatInt(blobContentLength, 10)) + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientResizeOptions != nil && pageBlobClientResizeOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientResizeOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// resizeHandleResponse handles the Resize response. +func (client *pageBlobClient) resizeHandleResponse(resp *http.Response) (pageBlobClientResizeResponse, error) { + result := pageBlobClientResizeResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientResizeResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientResizeResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientResizeResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UpdateSequenceNumber - Update the sequence number of the blob +// If the operation fails it returns an *azcore.ResponseError type. +// sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to +// page blobs only. This property indicates how the service should modify the blob's sequence number +// pageBlobClientUpdateSequenceNumberOptions - pageBlobClientUpdateSequenceNumberOptions contains the optional parameters +// for the pageBlobClient.UpdateSequenceNumber method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobClientUpdateSequenceNumberOptions *pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientUpdateSequenceNumberResponse, error) { + req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions, modifiedAccessConditions) + if err != nil { + return pageBlobClientUpdateSequenceNumberResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientUpdateSequenceNumberResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return pageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp) + } + return client.updateSequenceNumberHandleResponse(resp) +} + +// updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request. +func (client *pageBlobClient) updateSequenceNumberCreateRequest(ctx context.Context, sequenceNumberAction SequenceNumberActionType, pageBlobClientUpdateSequenceNumberOptions *pageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "properties") + if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUpdateSequenceNumberOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-sequence-number-action", string(sequenceNumberAction)) + if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.BlobSequenceNumber != nil { + req.Raw().Header.Set("x-ms-blob-sequence-number", strconv.FormatInt(*pageBlobClientUpdateSequenceNumberOptions.BlobSequenceNumber, 10)) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientUpdateSequenceNumberOptions != nil && pageBlobClientUpdateSequenceNumberOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUpdateSequenceNumberOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response. +func (client *pageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (pageBlobClientUpdateSequenceNumberResponse, error) { + result := pageBlobClientUpdateSequenceNumberResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUpdateSequenceNumberResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientUpdateSequenceNumberResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUpdateSequenceNumberResponse{}, err + } + result.Date = &date + } + return result, nil +} + +// UploadPages - The Upload Pages operation writes a range of pages to a page blob +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// body - Initial data +// pageBlobClientUploadPagesOptions - pageBlobClientUploadPagesOptions contains the optional parameters for the pageBlobClient.UploadPages +// method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +func (client *pageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobClientUploadPagesOptions *pageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (pageBlobClientUploadPagesResponse, error) { + req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, pageBlobClientUploadPagesOptions, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return pageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesHandleResponse(resp) +} + +// uploadPagesCreateRequest creates the UploadPages request. +func (client *pageBlobClient) uploadPagesCreateRequest(ctx context.Context, contentLength int64, body io.ReadSeekCloser, pageBlobClientUploadPagesOptions *pageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUploadPagesOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-page-write", "update") + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.TransactionalContentMD5 != nil { + req.Raw().Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesOptions.TransactionalContentMD5)) + } + if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.TransactionalContentCRC64 != nil { + req.Raw().Header.Set("x-ms-content-crc64", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesOptions.TransactionalContentCRC64)) + } + if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.Range != nil { + req.Raw().Header.Set("x-ms-range", *pageBlobClientUploadPagesOptions.Range) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientUploadPagesOptions != nil && pageBlobClientUploadPagesOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUploadPagesOptions.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, req.SetBody(body, "application/octet-stream") +} + +// uploadPagesHandleResponse handles the UploadPages response. +func (client *pageBlobClient) uploadPagesHandleResponse(resp *http.Response) (pageBlobClientUploadPagesResponse, error) { + result := pageBlobClientUploadPagesResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return pageBlobClientUploadPagesResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} + +// UploadPagesFromURL - The Upload Pages operation writes a range of pages to a page blob where the contents are read from +// a URL +// If the operation fails it returns an *azcore.ResponseError type. +// sourceURL - Specify a URL to the copy source. +// sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header +// and x-ms-range/Range destination range header. +// contentLength - The length of the request. +// rangeParam - The range of bytes to which the source range would be written. The range should be 512 aligned and range-end +// is required. +// pageBlobClientUploadPagesFromURLOptions - pageBlobClientUploadPagesFromURLOptions contains the optional parameters for +// the pageBlobClient.UploadPagesFromURL method. +// CpkInfo - CpkInfo contains a group of parameters for the blobClient.Download method. +// CpkScopeInfo - CpkScopeInfo contains a group of parameters for the blobClient.SetMetadata method. +// LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the containerClient.GetProperties method. +// SequenceNumberAccessConditions - SequenceNumberAccessConditions contains a group of parameters for the pageBlobClient.UploadPages +// method. +// ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the containerClient.Delete method. +// SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the blobClient.StartCopyFromURL +// method. +func (client *pageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobClientUploadPagesFromURLOptions *pageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (pageBlobClientUploadPagesFromURLResponse, error) { + req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, pageBlobClientUploadPagesFromURLOptions, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusCreated) { + return pageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp) + } + return client.uploadPagesFromURLHandleResponse(resp) +} + +// uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request. +func (client *pageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, pageBlobClientUploadPagesFromURLOptions *pageBlobClientUploadPagesFromURLOptions, cpkInfo *CpkInfo, cpkScopeInfo *CpkScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "page") + if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*pageBlobClientUploadPagesFromURLOptions.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-page-write", "update") + req.Raw().Header.Set("x-ms-copy-source", sourceURL) + req.Raw().Header.Set("x-ms-source-range", sourceRange) + if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.SourceContentMD5 != nil { + req.Raw().Header.Set("x-ms-source-content-md5", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesFromURLOptions.SourceContentMD5)) + } + if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.SourceContentcrc64 != nil { + req.Raw().Header.Set("x-ms-source-content-crc64", base64.StdEncoding.EncodeToString(pageBlobClientUploadPagesFromURLOptions.SourceContentcrc64)) + } + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Raw().Header.Set("x-ms-range", rangeParam) + if cpkInfo != nil && cpkInfo.EncryptionKey != nil { + req.Raw().Header.Set("x-ms-encryption-key", *cpkInfo.EncryptionKey) + } + if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil { + req.Raw().Header.Set("x-ms-encryption-key-sha256", *cpkInfo.EncryptionKeySHA256) + } + if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil { + req.Raw().Header.Set("x-ms-encryption-algorithm", string(*cpkInfo.EncryptionAlgorithm)) + } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header.Set("x-ms-encryption-scope", *cpkScopeInfo.EncryptionScope) + } + if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { + req.Raw().Header.Set("x-ms-lease-id", *leaseAccessConditions.LeaseID) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-le", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-lt", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)) + } + if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil { + req.Raw().Header.Set("x-ms-if-sequence-number-eq", strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { + req.Raw().Header.Set("If-Modified-Since", modifiedAccessConditions.IfModifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { + req.Raw().Header.Set("If-Unmodified-Since", modifiedAccessConditions.IfUnmodifiedSince.Format(time.RFC1123)) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil { + req.Raw().Header.Set("If-Match", *modifiedAccessConditions.IfMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil { + req.Raw().Header.Set("If-None-Match", *modifiedAccessConditions.IfNoneMatch) + } + if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { + req.Raw().Header.Set("x-ms-if-tags", *modifiedAccessConditions.IfTags) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-modified-since", sourceModifiedAccessConditions.SourceIfModifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil { + req.Raw().Header.Set("x-ms-source-if-unmodified-since", sourceModifiedAccessConditions.SourceIfUnmodifiedSince.Format(time.RFC1123)) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil { + req.Raw().Header.Set("x-ms-source-if-match", *sourceModifiedAccessConditions.SourceIfMatch) + } + if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { + req.Raw().Header.Set("x-ms-source-if-none-match", *sourceModifiedAccessConditions.SourceIfNoneMatch) + } + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *pageBlobClientUploadPagesFromURLOptions.RequestID) + } + if pageBlobClientUploadPagesFromURLOptions != nil && pageBlobClientUploadPagesFromURLOptions.CopySourceAuthorization != nil { + req.Raw().Header.Set("x-ms-copy-source-authorization", *pageBlobClientUploadPagesFromURLOptions.CopySourceAuthorization) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response. +func (client *pageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (pageBlobClientUploadPagesFromURLResponse, error) { + result := pageBlobClientUploadPagesFromURLResponse{RawResponse: resp} + if val := resp.Header.Get("ETag"); val != "" { + result.ETag = &val + } + if val := resp.Header.Get("Last-Modified"); val != "" { + lastModified, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.LastModified = &lastModified + } + if val := resp.Header.Get("Content-MD5"); val != "" { + contentMD5, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.ContentMD5 = contentMD5 + } + if val := resp.Header.Get("x-ms-content-crc64"); val != "" { + xMSContentCRC64, err := base64.StdEncoding.DecodeString(val) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.XMSContentCRC64 = xMSContentCRC64 + } + if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" { + blobSequenceNumber, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.BlobSequenceNumber = &blobSequenceNumber + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" { + isServerEncrypted, err := strconv.ParseBool(val) + if err != nil { + return pageBlobClientUploadPagesFromURLResponse{}, err + } + result.IsServerEncrypted = &isServerEncrypted + } + if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" { + result.EncryptionKeySHA256 = &val + } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go new file mode 100644 index 000000000..9f0cc4629 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_pagers.go @@ -0,0 +1,287 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "net/http" + "reflect" +) + +// containerClientListBlobFlatSegmentPager provides operations for iterating over paged responses. +type containerClientListBlobFlatSegmentPager struct { + client *containerClient + current containerClientListBlobFlatSegmentResponse + err error + requester func(context.Context) (*policy.Request, error) + advancer func(context.Context, containerClientListBlobFlatSegmentResponse) (*policy.Request, error) +} + +// Err returns the last error encountered while paging. +func (p *containerClientListBlobFlatSegmentPager) Err() error { + return p.err +} + +// NextPage returns true if the pager advanced to the next page. +// Returns false if there are no more pages or an error occurred. +func (p *containerClientListBlobFlatSegmentPager) NextPage(ctx context.Context) bool { + var req *policy.Request + var err error + if !reflect.ValueOf(p.current).IsZero() { + if p.current.ListBlobsFlatSegmentResponse.NextMarker == nil || len(*p.current.ListBlobsFlatSegmentResponse.NextMarker) == 0 { + return false + } + req, err = p.advancer(ctx, p.current) + } else { + req, err = p.requester(ctx) + } + if err != nil { + p.err = err + return false + } + resp, err := p.client.pl.Do(req) + if err != nil { + p.err = err + return false + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + p.err = runtime.NewResponseError(resp) + return false + } + result, err := p.client.listBlobFlatSegmentHandleResponse(resp) + if err != nil { + p.err = err + return false + } + p.current = result + return true +} + +// PageResponse returns the current containerClientListBlobFlatSegmentResponse page. +func (p *containerClientListBlobFlatSegmentPager) PageResponse() containerClientListBlobFlatSegmentResponse { + return p.current +} + +// containerClientListBlobHierarchySegmentPager provides operations for iterating over paged responses. +type containerClientListBlobHierarchySegmentPager struct { + client *containerClient + current containerClientListBlobHierarchySegmentResponse + err error + requester func(context.Context) (*policy.Request, error) + advancer func(context.Context, containerClientListBlobHierarchySegmentResponse) (*policy.Request, error) +} + +// Err returns the last error encountered while paging. +func (p *containerClientListBlobHierarchySegmentPager) Err() error { + return p.err +} + +// NextPage returns true if the pager advanced to the next page. +// Returns false if there are no more pages or an error occurred. +func (p *containerClientListBlobHierarchySegmentPager) NextPage(ctx context.Context) bool { + var req *policy.Request + var err error + if !reflect.ValueOf(p.current).IsZero() { + if p.current.ListBlobsHierarchySegmentResponse.NextMarker == nil || len(*p.current.ListBlobsHierarchySegmentResponse.NextMarker) == 0 { + return false + } + req, err = p.advancer(ctx, p.current) + } else { + req, err = p.requester(ctx) + } + if err != nil { + p.err = err + return false + } + resp, err := p.client.pl.Do(req) + if err != nil { + p.err = err + return false + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + p.err = runtime.NewResponseError(resp) + return false + } + result, err := p.client.listBlobHierarchySegmentHandleResponse(resp) + if err != nil { + p.err = err + return false + } + p.current = result + return true +} + +// PageResponse returns the current containerClientListBlobHierarchySegmentResponse page. +func (p *containerClientListBlobHierarchySegmentPager) PageResponse() containerClientListBlobHierarchySegmentResponse { + return p.current +} + +// pageBlobClientGetPageRangesDiffPager provides operations for iterating over paged responses. +type pageBlobClientGetPageRangesDiffPager struct { + client *pageBlobClient + current pageBlobClientGetPageRangesDiffResponse + err error + requester func(context.Context) (*policy.Request, error) + advancer func(context.Context, pageBlobClientGetPageRangesDiffResponse) (*policy.Request, error) +} + +// Err returns the last error encountered while paging. +func (p *pageBlobClientGetPageRangesDiffPager) Err() error { + return p.err +} + +// NextPage returns true if the pager advanced to the next page. +// Returns false if there are no more pages or an error occurred. +func (p *pageBlobClientGetPageRangesDiffPager) NextPage(ctx context.Context) bool { + var req *policy.Request + var err error + if !reflect.ValueOf(p.current).IsZero() { + if p.current.PageList.NextMarker == nil || len(*p.current.PageList.NextMarker) == 0 { + return false + } + req, err = p.advancer(ctx, p.current) + } else { + req, err = p.requester(ctx) + } + if err != nil { + p.err = err + return false + } + resp, err := p.client.pl.Do(req) + if err != nil { + p.err = err + return false + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + p.err = runtime.NewResponseError(resp) + return false + } + result, err := p.client.getPageRangesDiffHandleResponse(resp) + if err != nil { + p.err = err + return false + } + p.current = result + return true +} + +// PageResponse returns the current pageBlobClientGetPageRangesDiffResponse page. +func (p *pageBlobClientGetPageRangesDiffPager) PageResponse() pageBlobClientGetPageRangesDiffResponse { + return p.current +} + +// pageBlobClientGetPageRangesPager provides operations for iterating over paged responses. +type pageBlobClientGetPageRangesPager struct { + client *pageBlobClient + current pageBlobClientGetPageRangesResponse + err error + requester func(context.Context) (*policy.Request, error) + advancer func(context.Context, pageBlobClientGetPageRangesResponse) (*policy.Request, error) +} + +// Err returns the last error encountered while paging. +func (p *pageBlobClientGetPageRangesPager) Err() error { + return p.err +} + +// NextPage returns true if the pager advanced to the next page. +// Returns false if there are no more pages or an error occurred. +func (p *pageBlobClientGetPageRangesPager) NextPage(ctx context.Context) bool { + var req *policy.Request + var err error + if !reflect.ValueOf(p.current).IsZero() { + if p.current.PageList.NextMarker == nil || len(*p.current.PageList.NextMarker) == 0 { + return false + } + req, err = p.advancer(ctx, p.current) + } else { + req, err = p.requester(ctx) + } + if err != nil { + p.err = err + return false + } + resp, err := p.client.pl.Do(req) + if err != nil { + p.err = err + return false + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + p.err = runtime.NewResponseError(resp) + return false + } + result, err := p.client.getPageRangesHandleResponse(resp) + if err != nil { + p.err = err + return false + } + p.current = result + return true +} + +// PageResponse returns the current pageBlobClientGetPageRangesResponse page. +func (p *pageBlobClientGetPageRangesPager) PageResponse() pageBlobClientGetPageRangesResponse { + return p.current +} + +// serviceClientListContainersSegmentPager provides operations for iterating over paged responses. +type serviceClientListContainersSegmentPager struct { + client *serviceClient + current serviceClientListContainersSegmentResponse + err error + requester func(context.Context) (*policy.Request, error) + advancer func(context.Context, serviceClientListContainersSegmentResponse) (*policy.Request, error) +} + +// Err returns the last error encountered while paging. +func (p *serviceClientListContainersSegmentPager) Err() error { + return p.err +} + +// NextPage returns true if the pager advanced to the next page. +// Returns false if there are no more pages or an error occurred. +func (p *serviceClientListContainersSegmentPager) NextPage(ctx context.Context) bool { + var req *policy.Request + var err error + if !reflect.ValueOf(p.current).IsZero() { + if p.current.ListContainersSegmentResponse.NextMarker == nil || len(*p.current.ListContainersSegmentResponse.NextMarker) == 0 { + return false + } + req, err = p.advancer(ctx, p.current) + } else { + req, err = p.requester(ctx) + } + if err != nil { + p.err = err + return false + } + resp, err := p.client.pl.Do(req) + if err != nil { + p.err = err + return false + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + p.err = runtime.NewResponseError(resp) + return false + } + result, err := p.client.listContainersSegmentHandleResponse(resp) + if err != nil { + p.err = err + return false + } + p.current = result + return true +} + +// PageResponse returns the current serviceClientListContainersSegmentResponse page. +func (p *serviceClientListContainersSegmentPager) PageResponse() serviceClientListContainersSegmentResponse { + return p.current +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go new file mode 100644 index 000000000..60c1c0c34 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_response_types.go @@ -0,0 +1,2434 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "net/http" + "time" +) + +// appendBlobClientAppendBlockFromURLResponse contains the response from method appendBlobClient.AppendBlockFromURL. +type appendBlobClientAppendBlockFromURLResponse struct { + appendBlobClientAppendBlockFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// appendBlobClientAppendBlockFromURLResult contains the result from method appendBlobClient.AppendBlockFromURL. +type appendBlobClientAppendBlockFromURLResult struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// appendBlobClientAppendBlockResponse contains the response from method appendBlobClient.AppendBlock. +type appendBlobClientAppendBlockResponse struct { + appendBlobClientAppendBlockResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// appendBlobClientAppendBlockResult contains the result from method appendBlobClient.AppendBlock. +type appendBlobClientAppendBlockResult struct { + // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response. + BlobAppendOffset *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// appendBlobClientCreateResponse contains the response from method appendBlobClient.Create. +type appendBlobClientCreateResponse struct { + appendBlobClientCreateResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// appendBlobClientCreateResult contains the result from method appendBlobClient.Create. +type appendBlobClientCreateResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// appendBlobClientSealResponse contains the response from method appendBlobClient.Seal. +type appendBlobClientSealResponse struct { + appendBlobClientSealResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// appendBlobClientSealResult contains the result from method appendBlobClient.Seal. +type appendBlobClientSealResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientAbortCopyFromURLResponse contains the response from method blobClient.AbortCopyFromURL. +type blobClientAbortCopyFromURLResponse struct { + blobClientAbortCopyFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientAbortCopyFromURLResult contains the result from method blobClient.AbortCopyFromURL. +type blobClientAbortCopyFromURLResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientAcquireLeaseResponse contains the response from method blobClient.AcquireLease. +type blobClientAcquireLeaseResponse struct { + blobClientAcquireLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientAcquireLeaseResult contains the result from method blobClient.AcquireLease. +type blobClientAcquireLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientBreakLeaseResponse contains the response from method blobClient.BreakLease. +type blobClientBreakLeaseResponse struct { + blobClientBreakLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientBreakLeaseResult contains the result from method blobClient.BreakLease. +type blobClientBreakLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientChangeLeaseResponse contains the response from method blobClient.ChangeLease. +type blobClientChangeLeaseResponse struct { + blobClientChangeLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientChangeLeaseResult contains the result from method blobClient.ChangeLease. +type blobClientChangeLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientCopyFromURLResponse contains the response from method blobClient.CopyFromURL. +type blobClientCopyFromURLResponse struct { + blobClientCopyFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientCopyFromURLResult contains the result from method blobClient.CopyFromURL. +type blobClientCopyFromURLResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// blobClientCreateSnapshotResponse contains the response from method blobClient.CreateSnapshot. +type blobClientCreateSnapshotResponse struct { + blobClientCreateSnapshotResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientCreateSnapshotResult contains the result from method blobClient.CreateSnapshot. +type blobClientCreateSnapshotResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Snapshot contains the information returned from the x-ms-snapshot header response. + Snapshot *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blobClientDeleteImmutabilityPolicyResponse contains the response from method blobClient.DeleteImmutabilityPolicy. +type blobClientDeleteImmutabilityPolicyResponse struct { + blobClientDeleteImmutabilityPolicyResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientDeleteImmutabilityPolicyResult contains the result from method blobClient.DeleteImmutabilityPolicy. +type blobClientDeleteImmutabilityPolicyResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientDeleteResponse contains the response from method blobClient.Delete. +type blobClientDeleteResponse struct { + blobClientDeleteResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientDeleteResult contains the result from method blobClient.Delete. +type blobClientDeleteResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientDownloadResponse contains the response from method blobClient.Download. +type blobClientDownloadResponse struct { + blobClientDownloadResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientDownloadResult contains the result from method blobClient.Download. +type blobClientDownloadResult struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ErrorCode contains the information returned from the x-ms-error-code header response. + ErrorCode *string + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blobClientGetAccountInfoResponse contains the response from method blobClient.GetAccountInfo. +type blobClientGetAccountInfoResponse struct { + blobClientGetAccountInfoResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientGetAccountInfoResult contains the result from method blobClient.GetAccountInfo. +type blobClientGetAccountInfoResult struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientGetPropertiesResponse contains the response from method blobClient.GetProperties. +type blobClientGetPropertiesResponse struct { + blobClientGetPropertiesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientGetPropertiesResult contains the result from method blobClient.GetProperties. +type blobClientGetPropertiesResult struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // AccessTier contains the information returned from the x-ms-access-tier header response. + AccessTier *string + + // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response. + AccessTierChangeTime *time.Time + + // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response. + AccessTierInferred *bool + + // ArchiveStatus contains the information returned from the x-ms-archive-status header response. + ArchiveStatus *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response. + DestinationSnapshot *string + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // ExpiresOn contains the information returned from the x-ms-expiry-time header response. + ExpiresOn *time.Time + + // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiresOn *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + + // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response. + IsCurrentVersion *bool + + // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response. + IsIncrementalCopy *bool + + // IsSealed contains the information returned from the x-ms-blob-sealed header response. + IsSealed *bool + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastAccessed contains the information returned from the x-ms-last-access-time header response. + LastAccessed *time.Time + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response. + ObjectReplicationPolicyID *string + + // ObjectReplicationRules contains the information returned from the x-ms-or header response. + ObjectReplicationRules map[string]string + + // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response. + RehydratePriority *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // TagCount contains the information returned from the x-ms-tag-count header response. + TagCount *int64 + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blobClientGetTagsResponse contains the response from method blobClient.GetTags. +type blobClientGetTagsResponse struct { + blobClientGetTagsResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientGetTagsResult contains the result from method blobClient.GetTags. +type blobClientGetTagsResult struct { + BlobTags + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// blobClientQueryResponse contains the response from method blobClient.Query. +type blobClientQueryResponse struct { + blobClientQueryResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientQueryResult contains the result from method blobClient.Query. +type blobClientQueryResult struct { + // AcceptRanges contains the information returned from the Accept-Ranges header response. + AcceptRanges *string + + // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response. + BlobCommittedBlockCount *int32 + + // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response. + BlobContentMD5 []byte + + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // BlobType contains the information returned from the x-ms-blob-type header response. + BlobType *BlobType + + // CacheControl contains the information returned from the Cache-Control header response. + CacheControl *string + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + ContentCRC64 []byte + + // ContentDisposition contains the information returned from the Content-Disposition header response. + ContentDisposition *string + + // ContentEncoding contains the information returned from the Content-Encoding header response. + ContentEncoding *string + + // ContentLanguage contains the information returned from the Content-Language header response. + ContentLanguage *string + + // ContentLength contains the information returned from the Content-Length header response. + ContentLength *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // ContentRange contains the information returned from the Content-Range header response. + ContentRange *string + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response. + CopyCompletionTime *time.Time + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyProgress contains the information returned from the x-ms-copy-progress header response. + CopyProgress *string + + // CopySource contains the information returned from the x-ms-copy-source header response. + CopySource *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. + CopyStatusDescription *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientReleaseLeaseResponse contains the response from method blobClient.ReleaseLease. +type blobClientReleaseLeaseResponse struct { + blobClientReleaseLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientReleaseLeaseResult contains the result from method blobClient.ReleaseLease. +type blobClientReleaseLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientRenewLeaseResponse contains the response from method blobClient.RenewLease. +type blobClientRenewLeaseResponse struct { + blobClientRenewLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientRenewLeaseResult contains the result from method blobClient.RenewLease. +type blobClientRenewLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetExpiryResponse contains the response from method blobClient.SetExpiry. +type blobClientSetExpiryResponse struct { + blobClientSetExpiryResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetExpiryResult contains the result from method blobClient.SetExpiry. +type blobClientSetExpiryResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetHTTPHeadersResponse contains the response from method blobClient.SetHTTPHeaders. +type blobClientSetHTTPHeadersResponse struct { + blobClientSetHTTPHeadersResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetHTTPHeadersResult contains the result from method blobClient.SetHTTPHeaders. +type blobClientSetHTTPHeadersResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetImmutabilityPolicyResponse contains the response from method blobClient.SetImmutabilityPolicy. +type blobClientSetImmutabilityPolicyResponse struct { + blobClientSetImmutabilityPolicyResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetImmutabilityPolicyResult contains the result from method blobClient.SetImmutabilityPolicy. +type blobClientSetImmutabilityPolicyResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response. + ImmutabilityPolicyExpiry *time.Time + + // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response. + ImmutabilityPolicyMode *BlobImmutabilityPolicyMode + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetLegalHoldResponse contains the response from method blobClient.SetLegalHold. +type blobClientSetLegalHoldResponse struct { + blobClientSetLegalHoldResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetLegalHoldResult contains the result from method blobClient.SetLegalHold. +type blobClientSetLegalHoldResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // LegalHold contains the information returned from the x-ms-legal-hold header response. + LegalHold *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetMetadataResponse contains the response from method blobClient.SetMetadata. +type blobClientSetMetadataResponse struct { + blobClientSetMetadataResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetMetadataResult contains the result from method blobClient.SetMetadata. +type blobClientSetMetadataResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blobClientSetTagsResponse contains the response from method blobClient.SetTags. +type blobClientSetTagsResponse struct { + blobClientSetTagsResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetTagsResult contains the result from method blobClient.SetTags. +type blobClientSetTagsResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientSetTierResponse contains the response from method blobClient.SetTier. +type blobClientSetTierResponse struct { + blobClientSetTierResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientSetTierResult contains the result from method blobClient.SetTier. +type blobClientSetTierResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blobClientStartCopyFromURLResponse contains the response from method blobClient.StartCopyFromURL. +type blobClientStartCopyFromURLResponse struct { + blobClientStartCopyFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientStartCopyFromURLResult contains the result from method blobClient.StartCopyFromURL. +type blobClientStartCopyFromURLResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blobClientUndeleteResponse contains the response from method blobClient.Undelete. +type blobClientUndeleteResponse struct { + blobClientUndeleteResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blobClientUndeleteResult contains the result from method blobClient.Undelete. +type blobClientUndeleteResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// blockBlobClientCommitBlockListResponse contains the response from method blockBlobClient.CommitBlockList. +type blockBlobClientCommitBlockListResponse struct { + blockBlobClientCommitBlockListResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientCommitBlockListResult contains the result from method blockBlobClient.CommitBlockList. +type blockBlobClientCommitBlockListResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// blockBlobClientGetBlockListResponse contains the response from method blockBlobClient.GetBlockList. +type blockBlobClientGetBlockListResponse struct { + blockBlobClientGetBlockListResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientGetBlockListResult contains the result from method blockBlobClient.GetBlockList. +type blockBlobClientGetBlockListResult struct { + BlockList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *string `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// blockBlobClientPutBlobFromURLResponse contains the response from method blockBlobClient.PutBlobFromURL. +type blockBlobClientPutBlobFromURLResponse struct { + blockBlobClientPutBlobFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientPutBlobFromURLResult contains the result from method blockBlobClient.PutBlobFromURL. +type blockBlobClientPutBlobFromURLResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// blockBlobClientStageBlockFromURLResponse contains the response from method blockBlobClient.StageBlockFromURL. +type blockBlobClientStageBlockFromURLResponse struct { + blockBlobClientStageBlockFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientStageBlockFromURLResult contains the result from method blockBlobClient.StageBlockFromURL. +type blockBlobClientStageBlockFromURLResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// blockBlobClientStageBlockResponse contains the response from method blockBlobClient.StageBlock. +type blockBlobClientStageBlockResponse struct { + blockBlobClientStageBlockResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientStageBlockResult contains the result from method blockBlobClient.StageBlock. +type blockBlobClientStageBlockResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// blockBlobClientUploadResponse contains the response from method blockBlobClient.Upload. +type blockBlobClientUploadResponse struct { + blockBlobClientUploadResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// blockBlobClientUploadResult contains the result from method blockBlobClient.Upload. +type blockBlobClientUploadResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// containerClientAcquireLeaseResponse contains the response from method containerClient.AcquireLease. +type containerClientAcquireLeaseResponse struct { + containerClientAcquireLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientAcquireLeaseResult contains the result from method containerClient.AcquireLease. +type containerClientAcquireLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientBreakLeaseResponse contains the response from method containerClient.BreakLease. +type containerClientBreakLeaseResponse struct { + containerClientBreakLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientBreakLeaseResult contains the result from method containerClient.BreakLease. +type containerClientBreakLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseTime contains the information returned from the x-ms-lease-time header response. + LeaseTime *int32 + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientChangeLeaseResponse contains the response from method containerClient.ChangeLease. +type containerClientChangeLeaseResponse struct { + containerClientChangeLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientChangeLeaseResult contains the result from method containerClient.ChangeLease. +type containerClientChangeLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientCreateResponse contains the response from method containerClient.Create. +type containerClientCreateResponse struct { + containerClientCreateResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientCreateResult contains the result from method containerClient.Create. +type containerClientCreateResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientDeleteResponse contains the response from method containerClient.Delete. +type containerClientDeleteResponse struct { + containerClientDeleteResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientDeleteResult contains the result from method containerClient.Delete. +type containerClientDeleteResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientGetAccessPolicyResponse contains the response from method containerClient.GetAccessPolicy. +type containerClientGetAccessPolicyResponse struct { + containerClientGetAccessPolicyResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientGetAccessPolicyResult contains the result from method containerClient.GetAccessPolicy. +type containerClientGetAccessPolicyResult struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *string `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // a collection of signed identifiers + SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// containerClientGetAccountInfoResponse contains the response from method containerClient.GetAccountInfo. +type containerClientGetAccountInfoResponse struct { + containerClientGetAccountInfoResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientGetAccountInfoResult contains the result from method containerClient.GetAccountInfo. +type containerClientGetAccountInfoResult struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientGetPropertiesResponse contains the response from method containerClient.GetProperties. +type containerClientGetPropertiesResponse struct { + containerClientGetPropertiesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientGetPropertiesResult contains the result from method containerClient.GetProperties. +type containerClientGetPropertiesResult struct { + // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. + BlobPublicAccess *PublicAccessType + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response. + DefaultEncryptionScope *string + + // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response. + DenyEncryptionScopeOverride *bool + + // ETag contains the information returned from the ETag header response. + ETag *string + + // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response. + HasImmutabilityPolicy *bool + + // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response. + HasLegalHold *bool + + // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled + // header response. + IsImmutableStorageWithVersioningEnabled *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseDuration contains the information returned from the x-ms-lease-duration header response. + LeaseDuration *LeaseDurationType + + // LeaseState contains the information returned from the x-ms-lease-state header response. + LeaseState *LeaseStateType + + // LeaseStatus contains the information returned from the x-ms-lease-status header response. + LeaseStatus *LeaseStatusType + + // Metadata contains the information returned from the x-ms-meta header response. + Metadata map[string]string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientListBlobFlatSegmentResponse contains the response from method containerClient.ListBlobFlatSegment. +type containerClientListBlobFlatSegmentResponse struct { + containerClientListBlobFlatSegmentResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientListBlobFlatSegmentResult contains the result from method containerClient.ListBlobFlatSegment. +type containerClientListBlobFlatSegmentResult struct { + ListBlobsFlatSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// containerClientListBlobHierarchySegmentResponse contains the response from method containerClient.ListBlobHierarchySegment. +type containerClientListBlobHierarchySegmentResponse struct { + containerClientListBlobHierarchySegmentResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientListBlobHierarchySegmentResult contains the result from method containerClient.ListBlobHierarchySegment. +type containerClientListBlobHierarchySegmentResult struct { + ListBlobsHierarchySegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // ContentType contains the information returned from the Content-Type header response. + ContentType *string `xml:"ContentType"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// containerClientReleaseLeaseResponse contains the response from method containerClient.ReleaseLease. +type containerClientReleaseLeaseResponse struct { + containerClientReleaseLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientReleaseLeaseResult contains the result from method containerClient.ReleaseLease. +type containerClientReleaseLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientRenameResponse contains the response from method containerClient.Rename. +type containerClientRenameResponse struct { + containerClientRenameResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientRenameResult contains the result from method containerClient.Rename. +type containerClientRenameResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientRenewLeaseResponse contains the response from method containerClient.RenewLease. +type containerClientRenewLeaseResponse struct { + containerClientRenewLeaseResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientRenewLeaseResult contains the result from method containerClient.RenewLease. +type containerClientRenewLeaseResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // LeaseID contains the information returned from the x-ms-lease-id header response. + LeaseID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientRestoreResponse contains the response from method containerClient.Restore. +type containerClientRestoreResponse struct { + containerClientRestoreResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientRestoreResult contains the result from method containerClient.Restore. +type containerClientRestoreResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientSetAccessPolicyResponse contains the response from method containerClient.SetAccessPolicy. +type containerClientSetAccessPolicyResponse struct { + containerClientSetAccessPolicyResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientSetAccessPolicyResult contains the result from method containerClient.SetAccessPolicy. +type containerClientSetAccessPolicyResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientSetMetadataResponse contains the response from method containerClient.SetMetadata. +type containerClientSetMetadataResponse struct { + containerClientSetMetadataResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientSetMetadataResult contains the result from method containerClient.SetMetadata. +type containerClientSetMetadataResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// containerClientSubmitBatchResponse contains the response from method containerClient.SubmitBatch. +type containerClientSubmitBatchResponse struct { + containerClientSubmitBatchResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// containerClientSubmitBatchResult contains the result from method containerClient.SubmitBatch. +type containerClientSubmitBatchResult struct { + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// pageBlobClientClearPagesResponse contains the response from method pageBlobClient.ClearPages. +type pageBlobClientClearPagesResponse struct { + pageBlobClientClearPagesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientClearPagesResult contains the result from method pageBlobClient.ClearPages. +type pageBlobClientClearPagesResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// pageBlobClientCopyIncrementalResponse contains the response from method pageBlobClient.CopyIncremental. +type pageBlobClientCopyIncrementalResponse struct { + pageBlobClientCopyIncrementalResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientCopyIncrementalResult contains the result from method pageBlobClient.CopyIncremental. +type pageBlobClientCopyIncrementalResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // CopyID contains the information returned from the x-ms-copy-id header response. + CopyID *string + + // CopyStatus contains the information returned from the x-ms-copy-status header response. + CopyStatus *CopyStatusType + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// pageBlobClientCreateResponse contains the response from method pageBlobClient.Create. +type pageBlobClientCreateResponse struct { + pageBlobClientCreateResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientCreateResult contains the result from method pageBlobClient.Create. +type pageBlobClientCreateResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // VersionID contains the information returned from the x-ms-version-id header response. + VersionID *string +} + +// pageBlobClientGetPageRangesDiffResponse contains the response from method pageBlobClient.GetPageRangesDiff. +type pageBlobClientGetPageRangesDiffResponse struct { + pageBlobClientGetPageRangesDiffResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientGetPageRangesDiffResult contains the result from method pageBlobClient.GetPageRangesDiff. +type pageBlobClientGetPageRangesDiffResult struct { + PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *string `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// pageBlobClientGetPageRangesResponse contains the response from method pageBlobClient.GetPageRanges. +type pageBlobClientGetPageRangesResponse struct { + pageBlobClientGetPageRangesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientGetPageRangesResult contains the result from method pageBlobClient.GetPageRanges. +type pageBlobClientGetPageRangesResult struct { + PageList + // BlobContentLength contains the information returned from the x-ms-blob-content-length header response. + BlobContentLength *int64 `xml:"BlobContentLength"` + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // ETag contains the information returned from the ETag header response. + ETag *string `xml:"ETag"` + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time `xml:"LastModified"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// pageBlobClientResizeResponse contains the response from method pageBlobClient.Resize. +type pageBlobClientResizeResponse struct { + pageBlobClientResizeResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientResizeResult contains the result from method pageBlobClient.Resize. +type pageBlobClientResizeResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// pageBlobClientUpdateSequenceNumberResponse contains the response from method pageBlobClient.UpdateSequenceNumber. +type pageBlobClientUpdateSequenceNumberResponse struct { + pageBlobClientUpdateSequenceNumberResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientUpdateSequenceNumberResult contains the result from method pageBlobClient.UpdateSequenceNumber. +type pageBlobClientUpdateSequenceNumberResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// pageBlobClientUploadPagesFromURLResponse contains the response from method pageBlobClient.UploadPagesFromURL. +type pageBlobClientUploadPagesFromURLResponse struct { + pageBlobClientUploadPagesFromURLResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientUploadPagesFromURLResult contains the result from method pageBlobClient.UploadPagesFromURL. +type pageBlobClientUploadPagesFromURLResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// pageBlobClientUploadPagesResponse contains the response from method pageBlobClient.UploadPages. +type pageBlobClientUploadPagesResponse struct { + pageBlobClientUploadPagesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// pageBlobClientUploadPagesResult contains the result from method pageBlobClient.UploadPages. +type pageBlobClientUploadPagesResult struct { + // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response. + BlobSequenceNumber *int64 + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // ContentMD5 contains the information returned from the Content-MD5 header response. + ContentMD5 []byte + + // Date contains the information returned from the Date header response. + Date *time.Time + + // ETag contains the information returned from the ETag header response. + ETag *string + + // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response. + EncryptionKeySHA256 *string + + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + + // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response. + IsServerEncrypted *bool + + // LastModified contains the information returned from the Last-Modified header response. + LastModified *time.Time + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string + + // XMSContentCRC64 contains the information returned from the x-ms-content-crc64 header response. + XMSContentCRC64 []byte +} + +// serviceClientFilterBlobsResponse contains the response from method serviceClient.FilterBlobs. +type serviceClientFilterBlobsResponse struct { + serviceClientFilterBlobsResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientFilterBlobsResult contains the result from method serviceClient.FilterBlobs. +type serviceClientFilterBlobsResult struct { + FilterBlobSegment + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// serviceClientGetAccountInfoResponse contains the response from method serviceClient.GetAccountInfo. +type serviceClientGetAccountInfoResponse struct { + serviceClientGetAccountInfoResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientGetAccountInfoResult contains the result from method serviceClient.GetAccountInfo. +type serviceClientGetAccountInfoResult struct { + // AccountKind contains the information returned from the x-ms-account-kind header response. + AccountKind *AccountKind + + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // Date contains the information returned from the Date header response. + Date *time.Time + + // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response. + IsHierarchicalNamespaceEnabled *bool + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // SKUName contains the information returned from the x-ms-sku-name header response. + SKUName *SKUName + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// serviceClientGetPropertiesResponse contains the response from method serviceClient.GetProperties. +type serviceClientGetPropertiesResponse struct { + serviceClientGetPropertiesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientGetPropertiesResult contains the result from method serviceClient.GetProperties. +type serviceClientGetPropertiesResult struct { + StorageServiceProperties + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// serviceClientGetStatisticsResponse contains the response from method serviceClient.GetStatistics. +type serviceClientGetStatisticsResponse struct { + serviceClientGetStatisticsResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientGetStatisticsResult contains the result from method serviceClient.GetStatistics. +type serviceClientGetStatisticsResult struct { + StorageServiceStats + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// serviceClientGetUserDelegationKeyResponse contains the response from method serviceClient.GetUserDelegationKey. +type serviceClientGetUserDelegationKeyResponse struct { + serviceClientGetUserDelegationKeyResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientGetUserDelegationKeyResult contains the result from method serviceClient.GetUserDelegationKey. +type serviceClientGetUserDelegationKeyResult struct { + UserDelegationKey + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// serviceClientListContainersSegmentResponse contains the response from method serviceClient.ListContainersSegment. +type serviceClientListContainersSegmentResponse struct { + serviceClientListContainersSegmentResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientListContainersSegmentResult contains the result from method serviceClient.ListContainersSegment. +type serviceClientListContainersSegmentResult struct { + ListContainersSegmentResponse + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + +// serviceClientSetPropertiesResponse contains the response from method serviceClient.SetProperties. +type serviceClientSetPropertiesResponse struct { + serviceClientSetPropertiesResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientSetPropertiesResult contains the result from method serviceClient.SetProperties. +type serviceClientSetPropertiesResult struct { + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} + +// serviceClientSubmitBatchResponse contains the response from method serviceClient.SubmitBatch. +type serviceClientSubmitBatchResponse struct { + serviceClientSubmitBatchResult + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// serviceClientSubmitBatchResult contains the result from method serviceClient.SubmitBatch. +type serviceClientSubmitBatchResult struct { + // ContentType contains the information returned from the Content-Type header response. + ContentType *string + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string + + // Version contains the information returned from the x-ms-version header response. + Version *string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go new file mode 100644 index 000000000..7dcf6ef13 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_service_client.go @@ -0,0 +1,551 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "context" + "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +type serviceClient struct { + endpoint string + pl runtime.Pipeline +} + +// newServiceClient creates a new instance of serviceClient with the specified values. +// endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// pl - the pipeline used for sending requests and handling responses. +func newServiceClient(endpoint string, pl runtime.Pipeline) *serviceClient { + client := &serviceClient{ + endpoint: endpoint, + pl: pl, + } + return client +} + +// FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search +// expression. Filter blobs searches across all containers within a storage account but can +// be scoped within the expression to a single container. +// If the operation fails it returns an *azcore.ResponseError type. +// options - serviceClientFilterBlobsOptions contains the optional parameters for the serviceClient.FilterBlobs method. +func (client *serviceClient) FilterBlobs(ctx context.Context, options *serviceClientFilterBlobsOptions) (serviceClientFilterBlobsResponse, error) { + req, err := client.filterBlobsCreateRequest(ctx, options) + if err != nil { + return serviceClientFilterBlobsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + } + return client.filterBlobsHandleResponse(resp) +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *serviceClient) filterBlobsCreateRequest(ctx context.Context, options *serviceClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "blobs") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + if options != nil && options.Where != nil { + reqQP.Set("where", *options.Where) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *serviceClient) filterBlobsHandleResponse(resp *http.Response) (serviceClientFilterBlobsResponse, error) { + result := serviceClientFilterBlobsResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return serviceClientFilterBlobsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return serviceClientFilterBlobsResponse{}, err + } + return result, nil +} + +// GetAccountInfo - Returns the sku name and account kind +// If the operation fails it returns an *azcore.ResponseError type. +// options - serviceClientGetAccountInfoOptions contains the optional parameters for the serviceClient.GetAccountInfo method. +func (client *serviceClient) GetAccountInfo(ctx context.Context, options *serviceClientGetAccountInfoOptions) (serviceClientGetAccountInfoResponse, error) { + req, err := client.getAccountInfoCreateRequest(ctx, options) + if err != nil { + return serviceClientGetAccountInfoResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientGetAccountInfoResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp) + } + return client.getAccountInfoHandleResponse(resp) +} + +// getAccountInfoCreateRequest creates the GetAccountInfo request. +func (client *serviceClient) getAccountInfoCreateRequest(ctx context.Context, options *serviceClientGetAccountInfoOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "account") + reqQP.Set("comp", "properties") + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getAccountInfoHandleResponse handles the GetAccountInfo response. +func (client *serviceClient) getAccountInfoHandleResponse(resp *http.Response) (serviceClientGetAccountInfoResponse, error) { + result := serviceClientGetAccountInfoResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return serviceClientGetAccountInfoResponse{}, err + } + result.Date = &date + } + if val := resp.Header.Get("x-ms-sku-name"); val != "" { + result.SKUName = (*SKUName)(&val) + } + if val := resp.Header.Get("x-ms-account-kind"); val != "" { + result.AccountKind = (*AccountKind)(&val) + } + if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" { + isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val) + if err != nil { + return serviceClientGetAccountInfoResponse{}, err + } + result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled + } + return result, nil +} + +// GetProperties - gets the properties of a storage account's Blob service, including properties for Storage Analytics and +// CORS (Cross-Origin Resource Sharing) rules. +// If the operation fails it returns an *azcore.ResponseError type. +// options - serviceClientGetPropertiesOptions contains the optional parameters for the serviceClient.GetProperties method. +func (client *serviceClient) GetProperties(ctx context.Context, options *serviceClientGetPropertiesOptions) (serviceClientGetPropertiesResponse, error) { + req, err := client.getPropertiesCreateRequest(ctx, options) + if err != nil { + return serviceClientGetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientGetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientGetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.getPropertiesHandleResponse(resp) +} + +// getPropertiesCreateRequest creates the GetProperties request. +func (client *serviceClient) getPropertiesCreateRequest(ctx context.Context, options *serviceClientGetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getPropertiesHandleResponse handles the GetProperties response. +func (client *serviceClient) getPropertiesHandleResponse(resp *http.Response) (serviceClientGetPropertiesResponse, error) { + result := serviceClientGetPropertiesResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceProperties); err != nil { + return serviceClientGetPropertiesResponse{}, err + } + return result, nil +} + +// GetStatistics - Retrieves statistics related to replication for the Blob service. It is only available on the secondary +// location endpoint when read-access geo-redundant replication is enabled for the storage account. +// If the operation fails it returns an *azcore.ResponseError type. +// options - serviceClientGetStatisticsOptions contains the optional parameters for the serviceClient.GetStatistics method. +func (client *serviceClient) GetStatistics(ctx context.Context, options *serviceClientGetStatisticsOptions) (serviceClientGetStatisticsResponse, error) { + req, err := client.getStatisticsCreateRequest(ctx, options) + if err != nil { + return serviceClientGetStatisticsResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientGetStatisticsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientGetStatisticsResponse{}, runtime.NewResponseError(resp) + } + return client.getStatisticsHandleResponse(resp) +} + +// getStatisticsCreateRequest creates the GetStatistics request. +func (client *serviceClient) getStatisticsCreateRequest(ctx context.Context, options *serviceClientGetStatisticsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "stats") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// getStatisticsHandleResponse handles the GetStatistics response. +func (client *serviceClient) getStatisticsHandleResponse(resp *http.Response) (serviceClientGetStatisticsResponse, error) { + result := serviceClientGetStatisticsResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return serviceClientGetStatisticsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil { + return serviceClientGetStatisticsResponse{}, err + } + return result, nil +} + +// GetUserDelegationKey - Retrieves a user delegation key for the Blob service. This is only a valid operation when using +// bearer token authentication. +// If the operation fails it returns an *azcore.ResponseError type. +// keyInfo - Key information +// options - serviceClientGetUserDelegationKeyOptions contains the optional parameters for the serviceClient.GetUserDelegationKey +// method. +func (client *serviceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (serviceClientGetUserDelegationKeyResponse, error) { + req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options) + if err != nil { + return serviceClientGetUserDelegationKeyResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientGetUserDelegationKeyResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp) + } + return client.getUserDelegationKeyHandleResponse(resp) +} + +// getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request. +func (client *serviceClient) getUserDelegationKeyCreateRequest(ctx context.Context, keyInfo KeyInfo, options *serviceClientGetUserDelegationKeyOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "userdelegationkey") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, runtime.MarshalAsXML(req, keyInfo) +} + +// getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. +func (client *serviceClient) getUserDelegationKeyHandleResponse(resp *http.Response) (serviceClientGetUserDelegationKeyResponse, error) { + result := serviceClientGetUserDelegationKeyResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return serviceClientGetUserDelegationKeyResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil { + return serviceClientGetUserDelegationKeyResponse{}, err + } + return result, nil +} + +// ListContainersSegment - The List Containers Segment operation returns a list of the containers under the specified account +// If the operation fails it returns an *azcore.ResponseError type. +// options - serviceClientListContainersSegmentOptions contains the optional parameters for the serviceClient.ListContainersSegment +// method. +func (client *serviceClient) ListContainersSegment(options *serviceClientListContainersSegmentOptions) *serviceClientListContainersSegmentPager { + return &serviceClientListContainersSegmentPager{ + client: client, + requester: func(ctx context.Context) (*policy.Request, error) { + return client.listContainersSegmentCreateRequest(ctx, options) + }, + advancer: func(ctx context.Context, resp serviceClientListContainersSegmentResponse) (*policy.Request, error) { + return runtime.NewRequest(ctx, http.MethodGet, *resp.ListContainersSegmentResponse.NextMarker) + }, + } +} + +// listContainersSegmentCreateRequest creates the ListContainersSegment request. +func (client *serviceClient) listContainersSegmentCreateRequest(ctx context.Context, options *serviceClientListContainersSegmentOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "list") + if options != nil && options.Prefix != nil { + reqQP.Set("prefix", *options.Prefix) + } + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, nil +} + +// listContainersSegmentHandleResponse handles the ListContainersSegment response. +func (client *serviceClient) listContainersSegmentHandleResponse(resp *http.Response) (serviceClientListContainersSegmentResponse, error) { + result := serviceClientListContainersSegmentResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if err := runtime.UnmarshalAsXML(resp, &result.ListContainersSegmentResponse); err != nil { + return serviceClientListContainersSegmentResponse{}, err + } + return result, nil +} + +// SetProperties - Sets properties for a storage account's Blob service endpoint, including properties for Storage Analytics +// and CORS (Cross-Origin Resource Sharing) rules +// If the operation fails it returns an *azcore.ResponseError type. +// storageServiceProperties - The StorageService properties. +// options - serviceClientSetPropertiesOptions contains the optional parameters for the serviceClient.SetProperties method. +func (client *serviceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (serviceClientSetPropertiesResponse, error) { + req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options) + if err != nil { + return serviceClientSetPropertiesResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientSetPropertiesResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusAccepted) { + return serviceClientSetPropertiesResponse{}, runtime.NewResponseError(resp) + } + return client.setPropertiesHandleResponse(resp) +} + +// setPropertiesCreateRequest creates the SetProperties request. +func (client *serviceClient) setPropertiesCreateRequest(ctx context.Context, storageServiceProperties StorageServiceProperties, options *serviceClientSetPropertiesOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "service") + reqQP.Set("comp", "properties") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, runtime.MarshalAsXML(req, storageServiceProperties) +} + +// setPropertiesHandleResponse handles the SetProperties response. +func (client *serviceClient) setPropertiesHandleResponse(resp *http.Response) (serviceClientSetPropertiesResponse, error) { + result := serviceClientSetPropertiesResponse{RawResponse: resp} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} + +// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. +// If the operation fails it returns an *azcore.ResponseError type. +// contentLength - The length of the request. +// multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header +// value: multipart/mixed; boundary=batch_ +// body - Initial data +// options - serviceClientSubmitBatchOptions contains the optional parameters for the serviceClient.SubmitBatch method. +func (client *serviceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (serviceClientSubmitBatchResponse, error) { + req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options) + if err != nil { + return serviceClientSubmitBatchResponse{}, err + } + resp, err := client.pl.Do(req) + if err != nil { + return serviceClientSubmitBatchResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return serviceClientSubmitBatchResponse{}, runtime.NewResponseError(resp) + } + return client.submitBatchHandleResponse(resp) +} + +// submitBatchCreateRequest creates the SubmitBatch request. +func (client *serviceClient) submitBatchCreateRequest(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *serviceClientSubmitBatchOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodPost, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("comp", "batch") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + req.Raw().URL.RawQuery = reqQP.Encode() + runtime.SkipBodyDownload(req) + req.Raw().Header.Set("Content-Length", strconv.FormatInt(contentLength, 10)) + req.Raw().Header.Set("Content-Type", multipartContentType) + req.Raw().Header.Set("x-ms-version", "2020-10-02") + if options != nil && options.RequestID != nil { + req.Raw().Header.Set("x-ms-client-request-id", *options.RequestID) + } + req.Raw().Header.Set("Accept", "application/xml") + return req, runtime.MarshalAsXML(req, body) +} + +// submitBatchHandleResponse handles the SubmitBatch response. +func (client *serviceClient) submitBatchHandleResponse(resp *http.Response) (serviceClientSubmitBatchResponse, error) { + result := serviceClientSubmitBatchResponse{RawResponse: resp} + if val := resp.Header.Get("Content-Type"); val != "" { + result.ContentType = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + return result, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go new file mode 100644 index 000000000..42726159b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc1123.go @@ -0,0 +1,42 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "strings" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` +) + +type timeRFC1123 time.Time + +func (t timeRFC1123) MarshalJSON() ([]byte, error) { + b := []byte(time.Time(t).Format(rfc1123JSON)) + return b, nil +} + +func (t timeRFC1123) MarshalText() ([]byte, error) { + b := []byte(time.Time(t).Format(time.RFC1123)) + return b, nil +} + +func (t *timeRFC1123) UnmarshalJSON(data []byte) error { + p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data))) + *t = timeRFC1123(p) + return err +} + +func (t *timeRFC1123) UnmarshalText(data []byte) error { + p, err := time.Parse(time.RFC1123, string(data)) + *t = timeRFC1123(p) + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go new file mode 100644 index 000000000..c51d8d78c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_time_rfc3339.go @@ -0,0 +1,58 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "regexp" + "strings" + "time" +) + +const ( + utcLayoutJSON = `"2006-01-02T15:04:05.999999999"` + utcLayout = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`) + +type timeRFC3339 time.Time + +func (t timeRFC3339) MarshalJSON() (json []byte, err error) { + tt := time.Time(t) + return tt.MarshalJSON() +} + +func (t timeRFC3339) MarshalText() (text []byte, err error) { + tt := time.Time(t) + return tt.MarshalText() +} + +func (t *timeRFC3339) UnmarshalJSON(data []byte) error { + layout := utcLayoutJSON + if tzOffsetRegex.Match(data) { + layout = rfc3339JSON + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) UnmarshalText(data []byte) (err error) { + layout := utcLayout + if tzOffsetRegex.Match(data) { + layout = time.RFC3339Nano + } + return t.Parse(layout, string(data)) +} + +func (t *timeRFC3339) Parse(layout, value string) error { + p, err := time.Parse(layout, strings.ToUpper(value)) + *t = timeRFC3339(p) + return err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go new file mode 100644 index 000000000..1cf97387d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/zz_generated_xml_helper.go @@ -0,0 +1,40 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +package azblob + +import ( + "encoding/xml" + "strings" +) + +type additionalProperties map[string]*string + +// UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties. +func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + tokName := "" + for t, err := d.Token(); err == nil; t, err = d.Token() { + switch tt := t.(type) { + case xml.StartElement: + tokName = strings.ToLower(tt.Name.Local) + break + case xml.CharData: + if tokName == "" { + continue + } + if *ap == nil { + *ap = additionalProperties{} + } + s := string(tt) + (*ap)[tokName] = &s + tokName = "" + break + } + } + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 867943c77..bec9bbb9b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -16,6 +16,34 @@ cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/stubs +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.0.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/azcore +github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/async +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/body +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/loc +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op +github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared +github.com/Azure/azure-sdk-for-go/sdk/azcore/log +github.com/Azure/azure-sdk-for-go/sdk/azcore/policy +github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime +github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming +github.com/Azure/azure-sdk-for-go/sdk/azcore/to +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.0.0 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/internal/diag +github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo +github.com/Azure/azure-sdk-for-go/sdk/internal/log +github.com/Azure/azure-sdk-for-go/sdk/internal/temporal +github.com/Azure/azure-sdk-for-go/sdk/internal/uuid +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.4.1 +## explicit; go 1.18 +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal # github.com/VictoriaMetrics/fastcache v1.12.0 ## explicit; go 1.13 github.com/VictoriaMetrics/fastcache