From 7c737846a60b39d1502d848779ec7f1c387bcf47 Mon Sep 17 00:00:00 2001
From: Aliaksandr Valialkin <valyala@victoriametrics.com>
Date: Tue, 16 Jan 2024 22:48:46 +0200
Subject: [PATCH] vendor: run `make vendor-update`

---
 go.mod                                        |   83 +-
 go.sum                                        |  176 +-
 .../go/internal/.repo-metadata-full.json      |   80 +
 .../go/internal/trace/trace.go                |   41 +-
 vendor/cloud.google.com/go/storage/CHANGES.md |   13 +
 vendor/cloud.google.com/go/storage/bucket.go  |   51 +-
 vendor/cloud.google.com/go/storage/client.go  |   13 +-
 .../go/storage/grpc_client.go                 |   86 +-
 .../go/storage/http_client.go                 |   31 +-
 .../go/storage/internal/version.go            |    2 +-
 vendor/cloud.google.com/go/storage/storage.go |  154 +-
 .../sdk/storage/azblob/CHANGELOG.md           |   19 +
 .../sdk/storage/azblob/appendblob/client.go   |    7 +
 .../sdk/storage/azblob/assets.json            |    2 +-
 .../sdk/storage/azblob/blob/client.go         |   20 +-
 .../sdk/storage/azblob/blob/models.go         |    2 +-
 .../storage/azblob/bloberror/error_codes.go   |    2 +
 .../storage/azblob/blockblob/chunkwriting.go  |    4 +-
 .../sdk/storage/azblob/blockblob/client.go    |    9 +-
 .../sdk/storage/azblob/ci.yml                 |    1 +
 .../sdk/storage/azblob/common.go              |    2 +-
 .../storage/azblob/internal/base/clients.go   |    5 +-
 .../azblob/internal/exported/blob_batch.go    |    2 +-
 .../azblob/internal/exported/exported.go      |    2 +-
 .../azblob/internal/exported/version.go       |    2 +-
 .../azblob/internal/generated/autorest.md     |   10 +-
 .../generated/zz_appendblob_client.go         |  303 +-
 .../internal/generated/zz_blob_client.go      | 1633 +++++------
 .../internal/generated/zz_blockblob_client.go |  389 +--
 .../azblob/internal/generated/zz_constants.go |    3 +-
 .../internal/generated/zz_container_client.go |  572 ++--
 .../azblob/internal/generated/zz_models.go    | 1233 +--------
 .../internal/generated/zz_models_serde.go     |   97 +-
 .../azblob/internal/generated/zz_options.go   | 1469 ++++++++++
 .../internal/generated/zz_pageblob_client.go  |  516 ++--
 .../internal/generated/zz_response_types.go   |  156 +-
 .../internal/generated/zz_service_client.go   |  140 +-
 .../internal/generated/zz_time_rfc1123.go     |   23 +-
 .../internal/generated/zz_time_rfc3339.go     |   35 +-
 .../internal/generated/zz_xml_helper.go       |   26 +-
 .../storage/azblob/internal/shared/shared.go  |   27 +-
 .../sdk/storage/azblob/pageblob/client.go     |    7 +
 .../internal/base/internal/storage/storage.go |   90 +-
 .../apps/public/public.go                     |    2 +-
 .../aws/aws-sdk-go-v2/aws/config.go           |    3 +-
 .../aws-sdk-go-v2/aws/go_module_metadata.go   |    2 +-
 .../aws/aws-sdk-go-v2/aws/retry/middleware.go |    6 +-
 .../aws/aws-sdk-go-v2/config/CHANGELOG.md     |   12 +
 .../config/go_module_metadata.go              |    2 +-
 .../aws-sdk-go-v2/credentials/CHANGELOG.md    |   12 +
 .../endpointcreds/internal/client/auth.go     |   48 +
 .../endpointcreds/internal/client/client.go   |    1 +
 .../internal/client/endpoints.go              |   20 +
 .../internal/client/middleware.go             |   16 +
 .../credentials/go_module_metadata.go         |    2 +-
 .../feature/ec2/imds/CHANGELOG.md             |    4 +
 .../feature/ec2/imds/api_op_GetDynamicData.go |    1 +
 .../feature/ec2/imds/api_op_GetIAMInfo.go     |    1 +
 .../api_op_GetInstanceIdentityDocument.go     |    1 +
 .../feature/ec2/imds/api_op_GetMetadata.go    |    1 +
 .../feature/ec2/imds/api_op_GetRegion.go      |    1 +
 .../feature/ec2/imds/api_op_GetToken.go       |    1 +
 .../feature/ec2/imds/api_op_GetUserData.go    |    1 +
 .../aws-sdk-go-v2/feature/ec2/imds/auth.go    |   48 +
 .../feature/ec2/imds/endpoints.go             |   20 +
 .../feature/ec2/imds/go_module_metadata.go    |    2 +-
 .../feature/ec2/imds/request_middleware.go    |   24 +-
 .../feature/s3/manager/CHANGELOG.md           |   20 +
 .../feature/s3/manager/go_module_metadata.go  |    2 +-
 .../internal/configsources/CHANGELOG.md       |    4 +
 .../configsources/go_module_metadata.go       |    2 +-
 .../endpoints/awsrulesfn/partitions.json      |    3 +
 .../internal/endpoints/v2/CHANGELOG.md        |    4 +
 .../endpoints/v2/go_module_metadata.go        |    2 +-
 .../aws-sdk-go-v2/internal/v4a/CHANGELOG.md   |    4 +
 .../internal/v4a/go_module_metadata.go        |    2 +-
 .../service/internal/checksum/CHANGELOG.md    |    4 +
 .../internal/checksum/go_module_metadata.go   |    2 +-
 .../internal/checksum/middleware_add.go       |   13 +
 .../middleware_compute_input_checksum.go      |   15 +-
 .../internal/presigned-url/CHANGELOG.md       |    4 +
 .../presigned-url/go_module_metadata.go       |    2 +-
 .../service/internal/s3shared/CHANGELOG.md    |    4 +
 .../internal/s3shared/go_module_metadata.go   |    2 +-
 .../aws/aws-sdk-go-v2/service/s3/CHANGELOG.md |   16 +
 .../aws-sdk-go-v2/service/s3/api_client.go    |    6 +
 .../aws/aws-sdk-go-v2/service/s3/auth.go      |   10 +-
 .../service/s3/go_module_metadata.go          |    2 +-
 .../s3/internal/endpoints/endpoints.go        |   97 +
 .../aws-sdk-go-v2/service/sso/CHANGELOG.md    |    4 +
 .../service/sso/go_module_metadata.go         |    2 +-
 .../service/ssooidc/CHANGELOG.md              |    8 +
 .../service/ssooidc/go_module_metadata.go     |    2 +-
 .../ssooidc/internal/endpoints/endpoints.go   |    8 +
 .../aws-sdk-go-v2/service/sts/CHANGELOG.md    |    8 +
 .../aws-sdk-go-v2/service/sts/api_client.go   |    6 +
 .../service/sts/go_module_metadata.go         |    2 +-
 .../sts/internal/endpoints/endpoints.go       |    3 +
 .../github.com/aws/aws-sdk-go/aws/config.go   |   11 +
 .../aws/ec2metadata/token_provider.go         |    5 +-
 .../aws/aws-sdk-go/aws/endpoints/defaults.go  | 1210 ++++++++
 .../github.com/aws/aws-sdk-go/aws/version.go  |    2 +-
 vendor/github.com/go-logr/logr/README.md      |   67 +-
 vendor/github.com/go-logr/logr/context.go     |   33 +
 .../github.com/go-logr/logr/context_noslog.go |   49 +
 .../github.com/go-logr/logr/context_slog.go   |   83 +
 vendor/github.com/go-logr/logr/funcr/funcr.go |  189 +-
 .../github.com/go-logr/logr/funcr/slogsink.go |  105 +
 vendor/github.com/go-logr/logr/logr.go        |   43 -
 vendor/github.com/go-logr/logr/sloghandler.go |  192 ++
 vendor/github.com/go-logr/logr/slogr.go       |  100 +
 vendor/github.com/go-logr/logr/slogsink.go    |  120 +
 .../influxdata/influxdb/models/points.go      |   10 +-
 .../golang_protobuf_extensions/v2/LICENSE     |  201 --
 .../golang_protobuf_extensions/v2/NOTICE      |    1 -
 .../v2/pbutil/.gitignore                      |    1 -
 .../v2/pbutil/Makefile                        |    7 -
 .../v2/pbutil/decode.go                       |   81 -
 .../v2/pbutil/encode.go                       |   49 -
 .../client_golang/prometheus/histogram.go     |   56 +-
 .../client_golang/prometheus/labels.go        |    2 +
 .../prometheus/process_collector_other.go     |    4 +-
 .../prometheus/process_collector_wasip1.go}   |   20 +-
 .../prometheus/testutil/promlint/problem.go   |   33 +
 .../prometheus/testutil/promlint/promlint.go  |  316 +--
 .../testutil/promlint/validation.go           |   33 +
 .../validations/counter_validations.go        |   40 +
 .../validations/generic_name_validations.go   |  101 +
 .../promlint/validations/help_validations.go  |   32 +
 .../validations/histogram_validations.go      |   63 +
 .../testutil/promlint/validations/units.go    |  118 +
 .../prometheus/testutil/testutil.go           |   15 +
 .../prometheus/common/config/http_config.go   |   22 +-
 .../prometheus/common/expfmt/decode.go        |    9 +-
 .../prometheus/common/expfmt/encode.go        |    7 +-
 .../prometheus/common/expfmt/text_parse.go    |    8 +-
 .../prometheus/common/model/alert.go          |    4 +-
 .../prometheus/common/model/metadata.go       |   28 +
 .../prometheus/common/model/metric.go         |   10 +-
 .../prometheus/common/model/signature.go      |    6 +-
 .../prometheus/common/model/silence.go        |    2 +-
 .../prometheus/common/model/value.go          |   16 +-
 .../prometheus/common/model/value_float.go    |   14 +-
 .../prometheus/common/version/info.go         |   12 +-
 .../prometheus/common/version/info_default.go |    2 +-
 .../prometheus/common/version/info_go118.go   |   10 +-
 .../github.com/urfave/cli/v2/.golangci.yaml   |    4 +
 vendor/github.com/urfave/cli/v2/app.go        |    9 +-
 vendor/github.com/urfave/cli/v2/command.go    |    2 +
 vendor/github.com/urfave/cli/v2/context.go    |    2 +-
 .../urfave/cli/v2/flag_uint64_slice.go        |    9 +
 .../urfave/cli/v2/flag_uint_slice.go          |    9 +
 .../urfave/cli/v2/godoc-current.txt           |   20 +-
 vendor/github.com/urfave/cli/v2/help.go       |    2 +-
 .../github.com/urfave/cli/v2/suggestions.go   |    8 +
 vendor/github.com/urfave/cli/v2/template.go   |    6 +-
 vendor/github.com/xrash/smetrics/soundex.go   |   58 +-
 .../collector/pdata/pcommon/value.go          |   26 +-
 .../x/crypto/internal/poly1305/bits_compat.go |   39 -
 .../x/crypto/internal/poly1305/bits_go1.13.go |   21 -
 .../x/crypto/internal/poly1305/sum_generic.go |   43 +-
 vendor/golang.org/x/oauth2/google/default.go  |   72 +-
 vendor/golang.org/x/sync/errgroup/errgroup.go |    3 +
 vendor/golang.org/x/sys/unix/mkerrors.sh      |   37 +-
 vendor/golang.org/x/sys/unix/zerrors_linux.go |   54 +
 .../x/sys/unix/zsyscall_openbsd_386.go        |    2 -
 .../x/sys/unix/zsyscall_openbsd_amd64.go      |    2 -
 .../x/sys/unix/zsyscall_openbsd_arm.go        |    2 -
 .../x/sys/unix/zsyscall_openbsd_arm64.go      |    2 -
 .../x/sys/unix/zsyscall_openbsd_mips64.go     |    2 -
 .../x/sys/unix/zsyscall_openbsd_ppc64.go      |    2 -
 .../x/sys/unix/zsyscall_openbsd_riscv64.go    |    2 -
 .../x/sys/windows/syscall_windows.go          |    1 +
 .../x/sys/windows/zsyscall_windows.go         |    9 +
 .../iamcredentials/v1/iamcredentials-gen.go   |    6 +-
 .../api/internal/settings.go                  |    1 +
 .../google.golang.org/api/internal/version.go |    2 +-
 .../option/internaloption/internaloption.go   |   19 +
 .../api/storage/v1/storage-api.json           |  356 ++-
 .../api/storage/v1/storage-gen.go             | 1287 ++++++++-
 .../api/transport/grpc/dial.go                |  135 +-
 vendor/google.golang.org/grpc/server.go       |   22 +-
 vendor/google.golang.org/grpc/version.go      |    2 +-
 .../encoding/protodelim/protodelim.go         |  160 ++
 .../protobuf/encoding/protojson/decode.go     |   38 +-
 .../protobuf/encoding/protojson/doc.go        |    2 +-
 .../protobuf/encoding/protojson/encode.go     |   39 +-
 .../encoding/protojson/well_known_types.go    |   55 +-
 .../protobuf/encoding/prototext/decode.go     |    8 +-
 .../protobuf/encoding/prototext/encode.go     |    4 +-
 .../protobuf/encoding/protowire/wire.go       |   28 +-
 .../protobuf/internal/descfmt/stringer.go     |  183 +-
 .../protobuf/internal/filedesc/desc.go        |   47 +-
 .../protobuf/internal/genid/descriptor_gen.go |  212 +-
 .../protobuf/internal/impl/codec_gen.go       |  113 +-
 .../protobuf/internal/impl/legacy_message.go  |   19 +-
 .../protobuf/internal/impl/message.go         |   17 +-
 .../protobuf/internal/impl/pointer_reflect.go |   36 +
 .../protobuf/internal/impl/pointer_unsafe.go  |   40 +
 ...ings_unsafe.go => strings_unsafe_go120.go} |    4 +-
 .../internal/strs/strings_unsafe_go121.go     |   74 +
 .../protobuf/internal/version/version.go      |    2 +-
 .../protobuf/proto/decode.go                  |    2 +-
 .../google.golang.org/protobuf/proto/doc.go   |   58 +-
 .../protobuf/proto/encode.go                  |    2 +-
 .../protobuf/proto/extension.go               |    2 +-
 .../google.golang.org/protobuf/proto/merge.go |    2 +-
 .../google.golang.org/protobuf/proto/proto.go |   18 +-
 .../protobuf/reflect/protodesc/desc.go        |   29 +-
 .../protobuf/reflect/protodesc/desc_init.go   |   24 +
 .../protobuf/reflect/protodesc/editions.go    |  177 ++
 .../reflect/protodesc/editions_defaults.binpb |    4 +
 .../protobuf/reflect/protodesc/proto.go       |   18 +-
 .../protobuf/reflect/protoreflect/proto.go    |   83 +-
 .../reflect/protoreflect/source_gen.go        |   62 +-
 .../protobuf/reflect/protoreflect/type.go     |   44 +-
 .../protobuf/reflect/protoreflect/value.go    |   24 +-
 .../reflect/protoreflect/value_equal.go       |    8 +-
 .../reflect/protoreflect/value_union.go       |   44 +-
 ...{value_unsafe.go => value_unsafe_go120.go} |    4 +-
 .../protoreflect/value_unsafe_go121.go        |   87 +
 .../reflect/protoregistry/registry.go         |   24 +-
 .../types/descriptorpb/descriptor.pb.go       | 2439 ++++++++++++-----
 .../protobuf/types/known/anypb/any.pb.go      |    3 +-
 vendor/modules.txt                            |   91 +-
 225 files changed, 12589 insertions(+), 5593 deletions(-)
 create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go
 create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go
 create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go
 create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go
 create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go
 create mode 100644 vendor/github.com/go-logr/logr/context.go
 create mode 100644 vendor/github.com/go-logr/logr/context_noslog.go
 create mode 100644 vendor/github.com/go-logr/logr/context_slog.go
 create mode 100644 vendor/github.com/go-logr/logr/funcr/slogsink.go
 create mode 100644 vendor/github.com/go-logr/logr/sloghandler.go
 create mode 100644 vendor/github.com/go-logr/logr/slogr.go
 create mode 100644 vendor/github.com/go-logr/logr/slogsink.go
 delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE
 delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE
 delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore
 delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile
 delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go
 delete mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go
 rename vendor/github.com/{matttproud/golang_protobuf_extensions/v2/pbutil/doc.go => prometheus/client_golang/prometheus/process_collector_wasip1.go} (63%)
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go
 create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go
 create mode 100644 vendor/github.com/prometheus/common/model/metadata.go
 create mode 100644 vendor/github.com/urfave/cli/v2/.golangci.yaml
 delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
 delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
 create mode 100644 vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
 rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe.go => strings_unsafe_go120.go} (96%)
 create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
 create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
 create mode 100644 vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb
 rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe.go => value_unsafe_go120.go} (97%)
 create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go

diff --git a/go.mod b/go.mod
index 2eb9a67118..41e7a1ed66 100644
--- a/go.mod
+++ b/go.mod
@@ -3,9 +3,9 @@ module github.com/VictoriaMetrics/VictoriaMetrics
 go 1.20
 
 require (
-	cloud.google.com/go/storage v1.35.1
+	cloud.google.com/go/storage v1.36.0
 	github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1
-	github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0
+	github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1
 	github.com/VictoriaMetrics/easyproto v0.1.4
 	github.com/VictoriaMetrics/fastcache v1.12.2
 
@@ -14,58 +14,58 @@ require (
 	github.com/VictoriaMetrics/fasthttp v1.2.0
 	github.com/VictoriaMetrics/metrics v1.31.0
 	github.com/VictoriaMetrics/metricsql v0.70.0
-	github.com/aws/aws-sdk-go-v2 v1.24.0
-	github.com/aws/aws-sdk-go-v2/config v1.26.1
-	github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7
-	github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5
+	github.com/aws/aws-sdk-go-v2 v1.24.1
+	github.com/aws/aws-sdk-go-v2/config v1.26.4
+	github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.12
+	github.com/aws/aws-sdk-go-v2/service/s3 v1.48.0
 	github.com/bmatcuk/doublestar/v4 v4.6.1
 	github.com/cespare/xxhash/v2 v2.2.0
 	github.com/cheggaaa/pb/v3 v3.1.4
 	github.com/gogo/protobuf v1.3.2
 	github.com/golang/snappy v0.0.4
 	github.com/googleapis/gax-go/v2 v2.12.0
-	github.com/influxdata/influxdb v1.11.2
+	github.com/influxdata/influxdb v1.11.4
 	github.com/klauspost/compress v1.17.4
 	github.com/prometheus/prometheus v0.48.1
-	github.com/urfave/cli/v2 v2.26.0
+	github.com/urfave/cli/v2 v2.27.1
 	github.com/valyala/fastjson v1.6.4
 	github.com/valyala/fastrand v1.1.0
 	github.com/valyala/fasttemplate v1.2.2
 	github.com/valyala/gozstd v1.20.1
 	github.com/valyala/histogram v1.2.0
 	github.com/valyala/quicktemplate v1.7.0
-	golang.org/x/net v0.19.0
-	golang.org/x/oauth2 v0.15.0
-	golang.org/x/sys v0.15.0
-	google.golang.org/api v0.154.0
+	golang.org/x/net v0.20.0
+	golang.org/x/oauth2 v0.16.0
+	golang.org/x/sys v0.16.0
+	google.golang.org/api v0.156.0
 	gopkg.in/yaml.v2 v2.4.0
 )
 
 require (
-	cloud.google.com/go v0.111.0 // indirect
+	cloud.google.com/go v0.112.0 // indirect
 	cloud.google.com/go/compute v1.23.3 // indirect
 	cloud.google.com/go/compute/metadata v0.2.3 // indirect
 	cloud.google.com/go/iam v1.1.5 // indirect
 	github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect
 	github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect
-	github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect
+	github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect
 	github.com/VividCortex/ewma v1.2.0 // indirect
 	github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect
-	github.com/aws/aws-sdk-go v1.49.1 // indirect
+	github.com/aws/aws-sdk-go v1.49.22 // indirect
 	github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect
-	github.com/aws/aws-sdk-go-v2/credentials v1.16.12 // indirect
-	github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 // indirect
-	github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 // indirect
-	github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 // indirect
+	github.com/aws/aws-sdk-go-v2/credentials v1.16.15 // indirect
+	github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect
+	github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect
+	github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect
 	github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect
-	github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 // indirect
+	github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 // indirect
 	github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect
-	github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 // indirect
-	github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 // indirect
-	github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 // indirect
-	github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 // indirect
-	github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 // indirect
-	github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 // indirect
+	github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 // indirect
+	github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect
+	github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 // indirect
+	github.com/aws/aws-sdk-go-v2/service/sso v1.18.6 // indirect
+	github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect
+	github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect
 	github.com/aws/smithy-go v1.19.0 // indirect
 	github.com/beorn7/perks v1.0.1 // indirect
 	github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect
@@ -75,7 +75,7 @@ require (
 	github.com/felixge/httpsnoop v1.0.4 // indirect
 	github.com/go-kit/log v0.2.1 // indirect
 	github.com/go-logfmt/logfmt v0.6.0 // indirect
-	github.com/go-logr/logr v1.3.0 // indirect
+	github.com/go-logr/logr v1.4.1 // indirect
 	github.com/go-logr/stdr v1.2.2 // indirect
 	github.com/golang-jwt/jwt/v5 v5.2.0 // indirect
 	github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
@@ -91,27 +91,26 @@ require (
 	github.com/mattn/go-colorable v0.1.13 // indirect
 	github.com/mattn/go-isatty v0.0.20 // indirect
 	github.com/mattn/go-runewidth v0.0.15 // indirect
-	github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect
 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
 	github.com/modern-go/reflect2 v1.0.2 // indirect
 	github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
 	github.com/oklog/ulid v1.3.1 // indirect
-	github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect
+	github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
 	github.com/pkg/errors v0.9.1 // indirect
 	github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
-	github.com/prometheus/client_golang v1.17.0 // indirect
+	github.com/prometheus/client_golang v1.18.0 // indirect
 	github.com/prometheus/client_model v0.5.0 // indirect
-	github.com/prometheus/common v0.45.0 // indirect
+	github.com/prometheus/common v0.46.0 // indirect
 	github.com/prometheus/common/sigv4 v0.1.0 // indirect
 	github.com/prometheus/procfs v0.12.0 // indirect
 	github.com/rivo/uniseg v0.4.4 // indirect
 	github.com/russross/blackfriday/v2 v2.1.0 // indirect
 	github.com/stretchr/testify v1.8.4 // indirect
 	github.com/valyala/bytebufferpool v1.0.0 // indirect
-	github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
+	github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e // indirect
 	go.opencensus.io v0.24.0 // indirect
-	go.opentelemetry.io/collector/pdata v1.0.0 // indirect
-	go.opentelemetry.io/collector/semconv v0.91.0 // indirect
+	go.opentelemetry.io/collector/pdata v1.0.1 // indirect
+	go.opentelemetry.io/collector/semconv v0.92.0 // indirect
 	go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect
 	go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect
 	go.opentelemetry.io/otel v1.21.0 // indirect
@@ -120,17 +119,17 @@ require (
 	go.uber.org/atomic v1.11.0 // indirect
 	go.uber.org/goleak v1.3.0 // indirect
 	go.uber.org/multierr v1.11.0 // indirect
-	golang.org/x/crypto v0.16.0 // indirect
-	golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb // indirect
-	golang.org/x/sync v0.5.0 // indirect
+	golang.org/x/crypto v0.18.0 // indirect
+	golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
+	golang.org/x/sync v0.6.0 // indirect
 	golang.org/x/text v0.14.0 // indirect
 	golang.org/x/time v0.5.0 // indirect
 	golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
 	google.golang.org/appengine v1.6.8 // indirect
-	google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 // indirect
-	google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 // indirect
-	google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 // indirect
-	google.golang.org/grpc v1.60.0 // indirect
-	google.golang.org/protobuf v1.31.0 // indirect
+	google.golang.org/genproto v0.0.0-20240108191215-35c7eff3a6b1 // indirect
+	google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 // indirect
+	google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect
+	google.golang.org/grpc v1.60.1 // indirect
+	google.golang.org/protobuf v1.32.0 // indirect
 	gopkg.in/yaml.v3 v3.0.1 // indirect
 )
diff --git a/go.sum b/go.sum
index 7be9d235b0..afff0d5cdb 100644
--- a/go.sum
+++ b/go.sum
@@ -13,8 +13,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
 cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
 cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
 cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
-cloud.google.com/go v0.111.0 h1:YHLKNupSD1KqjDbQ3+LVdQ81h/UJbJyZG203cEfnQgM=
-cloud.google.com/go v0.111.0/go.mod h1:0mibmpKP1TyOOFYQY5izo0LnT+ecvOQ0Sg3OdmMiNRU=
+cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM=
+cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4=
 cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
 cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
 cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -38,8 +38,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
 cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
 cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
 cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w=
-cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
+cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8=
+cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ=
 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA=
@@ -50,11 +50,11 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNic
 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ=
 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork v1.1.0 h1:QM6sE5k2ZT/vI5BEe0r7mqjsUSnhVBFbOsVkEuaEfiA=
 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v2 v2.2.1 h1:bWh0Z2rOEDfB/ywv/l0iHN1JgyazE6kW/aIA89+CEK0=
-github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
 github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
@@ -84,44 +84,44 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
 github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
 github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
 github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ=
-github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
-github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk=
-github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
+github.com/aws/aws-sdk-go v1.49.22 h1:r01+cQJ3cORQI1PJxG8af0jzrZpUOL9L+/3kU2x1geU=
+github.com/aws/aws-sdk-go v1.49.22/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
+github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU=
+github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4=
 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs=
 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo=
-github.com/aws/aws-sdk-go-v2/config v1.26.1 h1:z6DqMxclFGL3Zfo+4Q0rLnAZ6yVkzCRxhRMsiRQnD1o=
-github.com/aws/aws-sdk-go-v2/config v1.26.1/go.mod h1:ZB+CuKHRbb5v5F0oJtGdhFTelmrxd4iWO1lf0rQwSAg=
-github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuTegM+K8nncFkr1usU=
-github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7 h1:FnLf60PtjXp8ZOzQfhJVsqF0OtYKQZWQfqOLshh8YXg=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7/go.mod h1:tDVvl8hyU6E9B8TrnNrZQEVkQlB8hjJwcgpPhgtlnNg=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9/go.mod h1:hqamLz7g1/4EJP+GH5NBhcUMLjW+gKLQabgyz6/7WAU=
+github.com/aws/aws-sdk-go-v2/config v1.26.4 h1:Juj7LhtxNudNUlfX22K5AnLafO+v4eq9PA3VWSCIQs4=
+github.com/aws/aws-sdk-go-v2/config v1.26.4/go.mod h1:tioqQ7wvxMYnTDpoTTLHhV3Zh+z261i/f2oz+ds8eNI=
+github.com/aws/aws-sdk-go-v2/credentials v1.16.15 h1:P0/m1LU08MF2kRzx4P//+7lNjiJod1z4xI2WpWhdpTQ=
+github.com/aws/aws-sdk-go-v2/credentials v1.16.15/go.mod h1:pgtMCf7Dx4GWw5EpHOTc2Sy17LIP0A0N2C9nQ83pQ/0=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.12 h1:0FMZy36RSYvcvVzEf1xbNdebLHZewW40QWP+P8jCMVk=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.12/go.mod h1:+chyahvarkb3HibkNei9IQEM9P5cWD5w2kgXCa3Hh0I=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw=
 github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM=
 github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 h1:ugD6qzjYtB7zM5PN/ZIeaAIyefPaD82G8+SJopgvUpw=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9/go.mod h1:YD0aYBWCrPENpHolhKw2XDlTIWae2GKXT1T4o6N6hiM=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 h1:5oE2WzJE56/mVveuDZPJESKlg/00AaS2pY2QZcnxg4M=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10/go.mod h1:FHbKWQtRBYUz4vO5WBWjzMD2by126ny5y/1EoaWoLfI=
 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw=
 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 h1:/90OR2XbSYfXucBMJ4U14wrjlfleq/0SB6dZDPncgmo=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9/go.mod h1:dN/Of9/fNZet7UrQQ6kTDo/VSwKPIq94vjlU16bRARc=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJR8CSImIVCONRi4g0Su3J+TSTbS7G0pUeMU=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 h1:iEAeF6YC3l4FzlJPP9H3Ko1TXpdjdqWffxXjp8SY6uk=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9/go.mod h1:kjsXoK23q9Z/tLBrckZLLyvjhZoS+AGrzqzUfEClvMM=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 h1:Keso8lIOS+IzI2MkPZyK6G0LYcK3My2LQ+T5bxghEAY=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA=
-github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM=
-github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38=
-github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 h1:5UYvv8JUvllZsRnfrcMQ+hJ9jNICmcgKPAO1CER25Wg=
-github.com/aws/aws-sdk-go-v2/service/sts v1.26.5/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 h1:L0ai8WICYHozIKK+OtPzVJBugL7culcuM4E4JOpIEm8=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10/go.mod h1:byqfyxJBshFk0fF9YmK0M0ugIO8OWjzH2T3bPG4eGuA=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIGQSGs9w4jKm60F5dmCQ3EEruxdc0MFh+3EY4=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 h1:KOxnQeWy5sXyS37fdKEvAsGHOr9fa/qvwxfJurR/BzE=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10/go.mod h1:jMx5INQFYFYB3lQD9W0D8Ohgq6Wnl7NYOJ2TQndbulI=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.48.0 h1:PJTdBMsyvra6FtED7JZtDpQrIAflYDHFoZAu/sKYkwU=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.48.0/go.mod h1:4qXHrG1Ne3VGIMZPCB8OjH/pLFO94sKABIusjh0KWPU=
+github.com/aws/aws-sdk-go-v2/service/sso v1.18.6 h1:dGrs+Q/WzhsiUKh82SfTVN66QzyulXuMDTV/G8ZxOac=
+github.com/aws/aws-sdk-go-v2/service/sso v1.18.6/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8=
+github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0=
+github.com/aws/aws-sdk-go-v2/service/sts v1.26.7/go.mod h1:6h2YuIoxaMSCFf5fi1EgZAwdfkGMgDY+DVfa61uLe4U=
 github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM=
 github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE=
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -183,8 +183,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
 github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
 github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
 github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
-github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
 github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
 github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ=
@@ -293,8 +293,8 @@ github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY
 github.com/hetznercloud/hcloud-go/v2 v2.4.0 h1:MqlAE+w125PLvJRCpAJmEwrIxoVdUdOyuFUhE/Ukbok=
 github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
-github.com/influxdata/influxdb v1.11.2 h1:qOF3uQN1mDfJNEKwbAgJsqehf8IXgKok2vlGm736oGo=
-github.com/influxdata/influxdb v1.11.2/go.mod h1:eUMkLTE2vQwvSk6KGMrTBLKPaqSuczuelGbggigMPFw=
+github.com/influxdata/influxdb v1.11.4 h1:H3pVW+/tWQ4lkHhZxVQ13Ov1hmhHYaAzz8L5aq3ZNtw=
+github.com/influxdata/influxdb v1.11.4/go.mod h1:VO6X2zlamfmEf+Esc9dR+7UQhdE/krspWNEZPwxCrp0=
 github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4=
 github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
 github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
@@ -339,8 +339,6 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
 github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
 github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg=
-github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k=
 github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE=
 github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
 github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -360,8 +358,8 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn
 github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
 github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
 github.com/ovh/go-ovh v1.4.3 h1:Gs3V823zwTFpzgGLZNI6ILS4rmxZgJwJCz54Er9LwD0=
-github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
-github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
+github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
 github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -373,8 +371,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
 github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
 github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
 github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
-github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
-github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
+github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk=
+github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
 github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -385,8 +383,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
 github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
-github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM=
-github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY=
+github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y=
+github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ=
 github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
 github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -423,8 +421,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
 github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
 github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
 github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/urfave/cli/v2 v2.26.0 h1:3f3AMg3HpThFNT4I++TKOejZO8yU55t3JnnSr4S4QEI=
-github.com/urfave/cli/v2 v2.26.0/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
+github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho=
+github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
 github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
 github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
 github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
@@ -442,8 +440,8 @@ github.com/valyala/quicktemplate v1.7.0 h1:LUPTJmlVcb46OOUY3IeD9DojFpAVbsG+5WFTc
 github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8=
 github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
 github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
-github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
-github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
+github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e h1:+SOyEddqYF09QP7vr7CgJ1eti3pY9Fn3LHO1M1r/0sI=
+github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
 github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -456,10 +454,10 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
 go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
 go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/collector/pdata v1.0.0 h1:ECP2jnLztewsHmL1opL8BeMtWVc7/oSlKNhfY9jP8ec=
-go.opentelemetry.io/collector/pdata v1.0.0/go.mod h1:TsDFgs4JLNG7t6x9D8kGswXUz4mme+MyNChHx8zSF6k=
-go.opentelemetry.io/collector/semconv v0.91.0 h1:TRd+yDDfKQl+aNtS24wmEbJp1/QE/xAFV9SB5zWGxpE=
-go.opentelemetry.io/collector/semconv v0.91.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw=
+go.opentelemetry.io/collector/pdata v1.0.1 h1:dGX2h7maA6zHbl5D3AsMnF1c3Nn+3EUftbVCLzeyNvA=
+go.opentelemetry.io/collector/pdata v1.0.1/go.mod h1:jutXeu0QOXYY8wcZ/hege+YAnSBP3+jpTqYU1+JTI5Y=
+go.opentelemetry.io/collector/semconv v0.92.0 h1:3+OGPPuVu4rtrz8qGbpbiw7eKKULj4iJaSDTV52HM40=
+go.opentelemetry.io/collector/semconv v0.92.0/go.mod h1:gZ0uzkXsN+J5NpiRcdp9xOhNGQDDui8Y62p15sKrlzo=
 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE=
 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE=
 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24=
@@ -468,7 +466,7 @@ go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc=
 go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo=
 go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4=
 go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM=
-go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
+go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8=
 go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc=
 go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ=
 go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@@ -485,8 +483,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
 golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
-golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
+golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -497,8 +495,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
 golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8=
-golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
+golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
+golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
 golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
 golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -555,16 +553,16 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
 golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
-golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
-golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
+golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.15.0 h1:s8pnnxNVzjWyrvYdFUQq5llS1PX2zhPXmccZv99h7uQ=
-golang.org/x/oauth2 v0.15.0/go.mod h1:q48ptWNTY5XWf+JNten23lcvHpLJ0ZSxF5ttTHKVCAM=
+golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
+golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -576,8 +574,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
-golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -616,19 +614,19 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
 golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
-golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
+golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -687,7 +685,7 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc
 golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
-golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM=
+golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -710,8 +708,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
 google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
 google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
 google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.154.0 h1:X7QkVKZBskztmpPKWQXgjJRPA2dJYrL6r+sYPRLj050=
-google.golang.org/api v0.154.0/go.mod h1:qhSMkM85hgqiokIYsrRyKxrjfBeIhgl4Z2JmeRkYylc=
+google.golang.org/api v0.156.0 h1:yloYcGbBtVYjLKQe4enCunxvwn3s2w/XPrrhVf6MsvQ=
+google.golang.org/api v0.156.0/go.mod h1:bUSmn4KFO0Q+69zo9CNIDp4Psi6BqM0np0CbzKRSiSY=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -749,12 +747,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
 google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 h1:YJ5pD9rF8o9Qtta0Cmy9rdBwkSjrTCT6XTiUQVOtIos=
-google.golang.org/genproto v0.0.0-20231212172506-995d672761c0/go.mod h1:l/k7rMz0vFTBPy+tFSGvXEd3z+BcoG1k7EHbqm+YBsY=
-google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 h1:s1w3X6gQxwrLEpxnLd/qXTVLgQE2yXwaOaoa6IlY/+o=
-google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0/go.mod h1:CAny0tYF+0/9rmDB9fahA9YLzX3+AEVl1qXbv5hhj6c=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0 h1:/jFB8jK5R3Sq3i/lmeZO0cATSzFfZaJq1J2Euan3XKU=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0/go.mod h1:FUoWkonphQm3RhTS+kOEhF8h0iDpm4tdXolVCeZ9KKA=
+google.golang.org/genproto v0.0.0-20240108191215-35c7eff3a6b1 h1:/IWabOtPziuXTEtI1KYCpM6Ss7vaAkeMxk+uXV/xvZs=
+google.golang.org/genproto v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k=
+google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 h1:OPXtXn7fNMaXwO3JvOmF1QyTc00jsSFFz1vXXBOdCDo=
+google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
 google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
 google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -768,8 +766,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
 google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
 google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
 google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k=
-google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
+google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU=
+google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
 google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -782,8 +780,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
 google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
 google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
-google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
+google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
index 46c4094d3b..ae8a1fc146 100644
--- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
+++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
@@ -29,6 +29,26 @@
     "release_level": "stable",
     "library_type": "GAPIC_AUTO"
   },
+  "cloud.google.com/go/ai/generativelanguage/apiv1": {
+    "api_shortname": "generativelanguage",
+    "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1",
+    "description": "Generative Language API",
+    "language": "go",
+    "client_library_type": "generated",
+    "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1",
+    "release_level": "preview",
+    "library_type": "GAPIC_AUTO"
+  },
+  "cloud.google.com/go/ai/generativelanguage/apiv1beta": {
+    "api_shortname": "generativelanguage",
+    "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta",
+    "description": "Generative Language API",
+    "language": "go",
+    "client_library_type": "generated",
+    "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1beta",
+    "release_level": "preview",
+    "library_type": "GAPIC_AUTO"
+  },
   "cloud.google.com/go/ai/generativelanguage/apiv1beta2": {
     "api_shortname": "generativelanguage",
     "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta2",
@@ -179,6 +199,16 @@
     "release_level": "stable",
     "library_type": "GAPIC_AUTO"
   },
+  "cloud.google.com/go/apps/meet/apiv2beta": {
+    "api_shortname": "meet",
+    "distribution_name": "cloud.google.com/go/apps/meet/apiv2beta",
+    "description": "Google Meet API",
+    "language": "go",
+    "client_library_type": "generated",
+    "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apps/latest/meet/apiv2beta",
+    "release_level": "preview",
+    "library_type": "GAPIC_AUTO"
+  },
   "cloud.google.com/go/area120/tables/apiv1alpha1": {
     "api_shortname": "area120tables",
     "distribution_name": "cloud.google.com/go/area120/tables/apiv1alpha1",
@@ -629,6 +659,16 @@
     "release_level": "preview",
     "library_type": "GAPIC_AUTO"
   },
+  "cloud.google.com/go/cloudquotas/apiv1": {
+    "api_shortname": "cloudquotas",
+    "distribution_name": "cloud.google.com/go/cloudquotas/apiv1",
+    "description": "Cloud Quotas API",
+    "language": "go",
+    "client_library_type": "generated",
+    "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudquotas/latest/apiv1",
+    "release_level": "preview",
+    "library_type": "GAPIC_AUTO"
+  },
   "cloud.google.com/go/cloudtasks/apiv2": {
     "api_shortname": "cloudtasks",
     "distribution_name": "cloud.google.com/go/cloudtasks/apiv2",
@@ -969,6 +1009,16 @@
     "release_level": "stable",
     "library_type": "GAPIC_AUTO"
   },
+  "cloud.google.com/go/discoveryengine/apiv1alpha": {
+    "api_shortname": "discoveryengine",
+    "distribution_name": "cloud.google.com/go/discoveryengine/apiv1alpha",
+    "description": "Discovery Engine API",
+    "language": "go",
+    "client_library_type": "generated",
+    "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1alpha",
+    "release_level": "preview",
+    "library_type": "GAPIC_AUTO"
+  },
   "cloud.google.com/go/discoveryengine/apiv1beta": {
     "api_shortname": "discoveryengine",
     "distribution_name": "cloud.google.com/go/discoveryengine/apiv1beta",
@@ -2099,6 +2149,16 @@
     "release_level": "preview",
     "library_type": "GAPIC_AUTO"
   },
+  "cloud.google.com/go/securitycentermanagement/apiv1": {
+    "api_shortname": "securitycentermanagement",
+    "distribution_name": "cloud.google.com/go/securitycentermanagement/apiv1",
+    "description": "Security Center Management API",
+    "language": "go",
+    "client_library_type": "generated",
+    "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycentermanagement/latest/apiv1",
+    "release_level": "preview",
+    "library_type": "GAPIC_AUTO"
+  },
   "cloud.google.com/go/servicecontrol/apiv1": {
     "api_shortname": "servicecontrol",
     "distribution_name": "cloud.google.com/go/servicecontrol/apiv1",
@@ -2159,6 +2219,16 @@
     "release_level": "stable",
     "library_type": "GAPIC_AUTO"
   },
+  "cloud.google.com/go/shopping/css/apiv1": {
+    "api_shortname": "css",
+    "distribution_name": "cloud.google.com/go/shopping/css/apiv1",
+    "description": "CSS API",
+    "language": "go",
+    "client_library_type": "generated",
+    "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/css/apiv1",
+    "release_level": "preview",
+    "library_type": "GAPIC_AUTO"
+  },
   "cloud.google.com/go/shopping/merchant/inventories/apiv1beta": {
     "api_shortname": "merchantapi",
     "distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta",
@@ -2209,6 +2279,16 @@
     "release_level": "stable",
     "library_type": "GAPIC_AUTO"
   },
+  "cloud.google.com/go/spanner/executor/apiv1": {
+    "api_shortname": "spanner-cloud-executor",
+    "distribution_name": "cloud.google.com/go/spanner/executor/apiv1",
+    "description": "Cloud Spanner Executor test API",
+    "language": "go",
+    "client_library_type": "generated",
+    "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/spanner/latest/executor/apiv1",
+    "release_level": "preview",
+    "library_type": "GAPIC_AUTO"
+  },
   "cloud.google.com/go/speech/apiv1": {
     "api_shortname": "speech",
     "distribution_name": "cloud.google.com/go/speech/apiv1",
diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go
index f6b88253b4..eabed000f3 100644
--- a/vendor/cloud.google.com/go/internal/trace/trace.go
+++ b/vendor/cloud.google.com/go/internal/trace/trace.go
@@ -32,16 +32,33 @@ import (
 )
 
 const (
-	telemetryPlatformTracingOpenCensus    = "opencensus"
-	telemetryPlatformTracingOpenTelemetry = "opentelemetry"
-	telemetryPlatformTracingVar           = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING"
+	// TelemetryPlatformTracingOpenCensus is the value to which the environment
+	// variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be
+	// set to enable OpenCensus tracing.
+	TelemetryPlatformTracingOpenCensus = "opencensus"
+	// TelemetryPlatformTracingOpenCensus is the value to which the environment
+	// variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be
+	// set to enable OpenTelemetry tracing.
+	TelemetryPlatformTracingOpenTelemetry = "opentelemetry"
+	// TelemetryPlatformTracingOpenCensus is the name of the environment
+	// variable that can be set to change the default tracing from OpenCensus
+	// to OpenTelemetry.
+	TelemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING"
+	// OpenTelemetryTracerName is the name given to the OpenTelemetry Tracer
+	// when it is obtained from the OpenTelemetry TracerProvider.
+	OpenTelemetryTracerName = "cloud.google.com/go"
 )
 
 var (
-	// TODO(chrisdsmith): Should the name of the OpenTelemetry tracer be public and mutable?
-	openTelemetryTracerName     string = "cloud.google.com/go"
-	openTelemetryTracingEnabled bool   = strings.EqualFold(strings.TrimSpace(
-		os.Getenv(telemetryPlatformTracingVar)), telemetryPlatformTracingOpenTelemetry)
+	// OpenTelemetryTracingEnabled is true if the environment variable
+	// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the
+	// case-insensitive value "opentelemetry".
+	//
+	// Do not access directly. Use instead IsOpenTelemetryTracingEnabled or
+	// IsOpenCensusTracingEnabled. Intended for use only in unit tests. Restore
+	// original value after each test.
+	OpenTelemetryTracingEnabled bool = strings.EqualFold(strings.TrimSpace(
+		os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenTelemetry)
 )
 
 // IsOpenCensusTracingEnabled returns true if the environment variable
@@ -55,7 +72,7 @@ func IsOpenCensusTracingEnabled() bool {
 // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the
 // case-insensitive value "opentelemetry".
 func IsOpenTelemetryTracingEnabled() bool {
-	return openTelemetryTracingEnabled
+	return OpenTelemetryTracingEnabled
 }
 
 // StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled
@@ -63,12 +80,12 @@ func IsOpenTelemetryTracingEnabled() bool {
 // returns true, the span will be an OpenTelemetry span. Set the environment variable
 // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
 // value "opentelemetry" before loading the package to use OpenTelemetry tracing.
-// The default will remain OpenCensus until [TBD], at which time the default will
+// The default will remain OpenCensus until May 29, 2024, at which time the default will
 // switch to "opentelemetry" and explicitly setting the environment variable to
 // "opencensus" will be required to continue using OpenCensus tracing.
 func StartSpan(ctx context.Context, name string) context.Context {
 	if IsOpenTelemetryTracingEnabled() {
-		ctx, _ = otel.GetTracerProvider().Tracer(openTelemetryTracerName).Start(ctx, name)
+		ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name)
 	} else {
 		ctx, _ = trace.StartSpan(ctx, name)
 	}
@@ -80,7 +97,7 @@ func StartSpan(ctx context.Context, name string) context.Context {
 // returns true, the span will be an OpenTelemetry span. Set the environment variable
 // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
 // value "opentelemetry" before loading the package to use OpenTelemetry tracing.
-// The default will remain OpenCensus until [TBD], at which time the default will
+// The default will remain OpenCensus until May 29, 2024, at which time the default will
 // switch to "opentelemetry" and explicitly setting the environment variable to
 // "opencensus" will be required to continue using OpenCensus tracing.
 func EndSpan(ctx context.Context, err error) {
@@ -166,7 +183,7 @@ func httpStatusCodeToOCCode(httpStatusCode int) int32 {
 // span must be an OpenTelemetry span. Set the environment variable
 // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
 // value "opentelemetry" before loading the package to use OpenTelemetry tracing.
-// The default will remain OpenCensus until [TBD], at which time the default will
+// The default will remain OpenCensus until May 29, 2024, at which time the default will
 // switch to "opentelemetry" and explicitly setting the environment variable to
 // "opencensus" will be required to continue using OpenCensus tracing.
 func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md
index 30ee040f76..8b3fa6fc48 100644
--- a/vendor/cloud.google.com/go/storage/CHANGES.md
+++ b/vendor/cloud.google.com/go/storage/CHANGES.md
@@ -1,6 +1,19 @@
 # Changes
 
 
+## [1.36.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.1...storage/v1.36.0) (2023-12-14)
+
+
+### Features
+
+* **storage:** Add object retention feature ([#9072](https://github.com/googleapis/google-cloud-go/issues/9072)) ([16ecfd1](https://github.com/googleapis/google-cloud-go/commit/16ecfd150ff1982f03d207a80a82e934d1013874))
+
+
+### Bug Fixes
+
+* **storage:** Do not inhibit the dead code elimination. ([#8543](https://github.com/googleapis/google-cloud-go/issues/8543)) ([ca2493f](https://github.com/googleapis/google-cloud-go/commit/ca2493f43c299bbaed5f7e5b70f66cc763ff9802))
+* **storage:** Set flush and get_state to false on the last write in gRPC ([#9013](https://github.com/googleapis/google-cloud-go/issues/9013)) ([c1e9fe5](https://github.com/googleapis/google-cloud-go/commit/c1e9fe5f4166a71e55814ccf126926ec0e0e7945))
+
 ## [1.35.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.0...storage/v1.35.1) (2023-11-09)
 
 
diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go
index 3818c4498c..1059d4e8b7 100644
--- a/vendor/cloud.google.com/go/storage/bucket.go
+++ b/vendor/cloud.google.com/go/storage/bucket.go
@@ -41,13 +41,14 @@ import (
 // BucketHandle provides operations on a Google Cloud Storage bucket.
 // Use Client.Bucket to get a handle.
 type BucketHandle struct {
-	c                *Client
-	name             string
-	acl              ACLHandle
-	defaultObjectACL ACLHandle
-	conds            *BucketConditions
-	userProject      string // project for Requester Pays buckets
-	retry            *retryConfig
+	c                     *Client
+	name                  string
+	acl                   ACLHandle
+	defaultObjectACL      ACLHandle
+	conds                 *BucketConditions
+	userProject           string // project for Requester Pays buckets
+	retry                 *retryConfig
+	enableObjectRetention *bool
 }
 
 // Bucket returns a BucketHandle, which provides operations on the named bucket.
@@ -85,7 +86,8 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
 	defer func() { trace.EndSpan(ctx, err) }()
 
 	o := makeStorageOpts(true, b.retry, b.userProject)
-	if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, o...); err != nil {
+
+	if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, b.enableObjectRetention, o...); err != nil {
 		return err
 	}
 	return nil
@@ -462,6 +464,15 @@ type BucketAttrs struct {
 	// allows for the automatic selection of the best storage class
 	// based on object access patterns.
 	Autoclass *Autoclass
+
+	// ObjectRetentionMode reports whether individual objects in the bucket can
+	// be configured with a retention policy. An empty value means that object
+	// retention is disabled.
+	// This field is read-only. Object retention can be enabled only by creating
+	// a bucket with SetObjectRetention set to true on the BucketHandle. It
+	// cannot be modified once the bucket is created.
+	// ObjectRetention cannot be configured or reported through the gRPC API.
+	ObjectRetentionMode string
 }
 
 // BucketPolicyOnly is an alias for UniformBucketLevelAccess.
@@ -757,6 +768,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
 	if err != nil {
 		return nil, err
 	}
+
 	return &BucketAttrs{
 		Name:                     b.Name,
 		Location:                 b.Location,
@@ -771,6 +783,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
 		RequesterPays:            b.Billing != nil && b.Billing.RequesterPays,
 		Lifecycle:                toLifecycle(b.Lifecycle),
 		RetentionPolicy:          rp,
+		ObjectRetentionMode:      toBucketObjectRetention(b.ObjectRetention),
 		CORS:                     toCORS(b.Cors),
 		Encryption:               toBucketEncryption(b.Encryption),
 		Logging:                  toBucketLogging(b.Logging),
@@ -1348,6 +1361,17 @@ func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
 	return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...)
 }
 
+// SetObjectRetention returns a new BucketHandle that will enable object retention
+// on bucket creation. To enable object retention, you must use the returned
+// handle to create the bucket. This has no effect on an already existing bucket.
+// ObjectRetention is not enabled by default.
+// ObjectRetention cannot be configured through the gRPC API.
+func (b *BucketHandle) SetObjectRetention(enable bool) *BucketHandle {
+	b2 := *b
+	b2.enableObjectRetention = &enable
+	return &b2
+}
+
 // applyBucketConds modifies the provided call using the conditions in conds.
 // call is something that quacks like a *raw.WhateverCall.
 func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
@@ -1360,11 +1384,11 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{})
 	cval := reflect.ValueOf(call)
 	switch {
 	case conds.MetagenerationMatch != 0:
-		if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
+		if !setIfMetagenerationMatch(cval, conds.MetagenerationMatch) {
 			return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
 		}
 	case conds.MetagenerationNotMatch != 0:
-		if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
+		if !setIfMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) {
 			return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
 		}
 	}
@@ -1447,6 +1471,13 @@ func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *Retention
 	}
 }
 
+func toBucketObjectRetention(or *raw.BucketObjectRetention) string {
+	if or == nil {
+		return ""
+	}
+	return or.Mode
+}
+
 func toRawCORS(c []CORS) []*raw.BucketCors {
 	var out []*raw.BucketCors
 	for _, v := range c {
diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go
index 3bed9b64c2..4906b1d1f7 100644
--- a/vendor/cloud.google.com/go/storage/client.go
+++ b/vendor/cloud.google.com/go/storage/client.go
@@ -44,7 +44,7 @@ type storageClient interface {
 	// Top-level methods.
 
 	GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error)
-	CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error)
+	CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error)
 	ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator
 	Close() error
 
@@ -60,7 +60,7 @@ type storageClient interface {
 
 	DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error
 	GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
-	UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error)
+	UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error)
 
 	// Default Object ACL methods.
 
@@ -291,6 +291,15 @@ type newRangeReaderParams struct {
 	readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
 }
 
+type updateObjectParams struct {
+	bucket, object    string
+	uattrs            *ObjectAttrsToUpdate
+	gen               int64
+	encryptionKey     []byte
+	conds             *Conditions
+	overrideRetention *bool
+}
+
 type composeObjectRequest struct {
 	dstBucket     string
 	dstObject     destinationObject
diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go
index 99dfba4677..a51cf9c086 100644
--- a/vendor/cloud.google.com/go/storage/grpc_client.go
+++ b/vendor/cloud.google.com/go/storage/grpc_client.go
@@ -152,7 +152,12 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin
 	return resp.EmailAddress, err
 }
 
-func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
+func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) {
+	if enableObjectRetention != nil {
+		// TO-DO: implement ObjectRetention once available - see b/308194853
+		return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC")
+	}
+
 	s := callSettings(c.settings, opts...)
 	b := attrs.toProtoBucket()
 	b.Project = toProjectResource(project)
@@ -507,25 +512,30 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string
 	return attrs, err
 }
 
-func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
+func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
+	uattrs := params.uattrs
+	if params.overrideRetention != nil || uattrs.Retention != nil {
+		// TO-DO: implement ObjectRetention once available - see b/308194853
+		return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC")
+	}
 	s := callSettings(c.settings, opts...)
-	o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, bucket), object)
+	o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, params.bucket), params.object)
 	// For Update, generation is passed via the object message rather than a field on the request.
-	if gen >= 0 {
-		o.Generation = gen
+	if params.gen >= 0 {
+		o.Generation = params.gen
 	}
 	req := &storagepb.UpdateObjectRequest{
 		Object:        o,
 		PredefinedAcl: uattrs.PredefinedACL,
 	}
-	if err := applyCondsProto("grpcStorageClient.UpdateObject", defaultGen, conds, req); err != nil {
+	if err := applyCondsProto("grpcStorageClient.UpdateObject", defaultGen, params.conds, req); err != nil {
 		return nil, err
 	}
 	if s.userProject != "" {
 		ctx = setUserProjectMetadata(ctx, s.userProject)
 	}
-	if encryptionKey != nil {
-		req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey)
+	if params.encryptionKey != nil {
+		req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(params.encryptionKey)
 	}
 
 	fieldMask := &fieldmaskpb.FieldMask{Paths: nil}
@@ -739,7 +749,8 @@ func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object
 	}
 	uattrs := &ObjectAttrsToUpdate{ACL: acl}
 	// Call UpdateObject with the specified metageneration.
-	if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil {
+	params := &updateObjectParams{bucket: bucket, object: object, uattrs: uattrs, gen: defaultGen, conds: &Conditions{MetagenerationMatch: attrs.Metageneration}}
+	if _, err = c.UpdateObject(ctx, params, opts...); err != nil {
 		return err
 	}
 	return nil
@@ -769,7 +780,8 @@ func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object
 	acl = append(attrs.ACL, aclRule)
 	uattrs := &ObjectAttrsToUpdate{ACL: acl}
 	// Call UpdateObject with the specified metageneration.
-	if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil {
+	params := &updateObjectParams{bucket: bucket, object: object, uattrs: uattrs, gen: defaultGen, conds: &Conditions{MetagenerationMatch: attrs.Metageneration}}
+	if _, err = c.UpdateObject(ctx, params, opts...); err != nil {
 		return err
 	}
 	return nil
@@ -1049,6 +1061,13 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage
 				return
 			}
 
+			if params.attrs.Retention != nil {
+				// TO-DO: remove once ObjectRetention is available - see b/308194853
+				err = status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC")
+				errorf(err)
+				pr.CloseWithError(err)
+				return
+			}
 			// The chunk buffer is full, but there is no end in sight. This
 			// means that either:
 			// 1. A resumable upload will need to be used to send
@@ -1629,8 +1648,8 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
 			},
 			WriteOffset: writeOffset,
 			FinishWrite: lastWriteOfEntireObject,
-			Flush:       remainingDataFitsInSingleReq,
-			StateLookup: remainingDataFitsInSingleReq,
+			Flush:       remainingDataFitsInSingleReq && !lastWriteOfEntireObject,
+			StateLookup: remainingDataFitsInSingleReq && !lastWriteOfEntireObject,
 		}
 
 		// Open a new stream if necessary and set the first_message field on
@@ -1723,32 +1742,33 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
 			return nil, writeOffset, nil
 		}
 
-		// Done sending data (remainingDataFitsInSingleReq should == true if we
-		// reach this code). Receive from the stream to confirm the persisted data.
-		resp, err := w.stream.Recv()
+		// Done sending the data in the buffer (remainingDataFitsInSingleReq
+		// should == true if we reach this code).
+		// If we are done sending the whole object, close the stream and get the final
+		// object. Otherwise, receive from the stream to confirm the persisted data.
+		if !lastWriteOfEntireObject {
+			resp, err := w.stream.Recv()
 
-		// Retriable errors mean we should start over and attempt to
-		// resend the entire buffer via a new stream.
-		// If not retriable, falling through will return the error received
-		// from closing the stream.
-		if shouldRetry(err) {
-			writeOffset, err = w.determineOffset(start)
+			// Retriable errors mean we should start over and attempt to
+			// resend the entire buffer via a new stream.
+			// If not retriable, falling through will return the error received
+			// from closing the stream.
+			if shouldRetry(err) {
+				writeOffset, err = w.determineOffset(start)
+				if err != nil {
+					return nil, 0, err
+				}
+				sent = int(writeOffset) - int(start)
+
+				// Drop the stream reference as a new one will need to be created.
+				w.stream = nil
+
+				continue
+			}
 			if err != nil {
 				return nil, 0, err
 			}
-			sent = int(writeOffset) - int(start)
 
-			// Drop the stream reference as a new one will need to be created.
-			w.stream = nil
-
-			continue
-		}
-		if err != nil {
-			return nil, 0, err
-		}
-
-		// Confirm the persisted data if we have not finished uploading the object.
-		if !lastWriteOfEntireObject {
 			if resp.GetPersistedSize() != writeOffset {
 				// Retry if not all bytes were persisted.
 				writeOffset = resp.GetPersistedSize()
diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go
index b62f009da1..0e157e4ba9 100644
--- a/vendor/cloud.google.com/go/storage/http_client.go
+++ b/vendor/cloud.google.com/go/storage/http_client.go
@@ -159,7 +159,7 @@ func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project strin
 	return res.EmailAddress, nil
 }
 
-func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) {
+func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) {
 	s := callSettings(c.settings, opts...)
 	var bkt *raw.Bucket
 	if attrs != nil {
@@ -181,6 +181,9 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket st
 	if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
 		req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
 	}
+	if enableObjectRetention != nil {
+		req.EnableObjectRetention(*enableObjectRetention)
+	}
 	var battrs *BucketAttrs
 	err := run(ctx, func(ctx context.Context) error {
 		b, err := req.Context(ctx).Do()
@@ -431,7 +434,8 @@ func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string
 	return newObject(obj), nil
 }
 
-func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) {
+func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
+	uattrs := params.uattrs
 	s := callSettings(c.settings, opts...)
 
 	var attrs ObjectAttrs
@@ -496,11 +500,21 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str
 		// we don't append to nullFields here.
 		forceSendFields = append(forceSendFields, "Acl")
 	}
-	rawObj := attrs.toRawObject(bucket)
+	if uattrs.Retention != nil {
+		// For ObjectRetention it's an error to send empty fields.
+		// Instead we send a null as the user's intention is to remove.
+		if uattrs.Retention.Mode == "" && uattrs.Retention.RetainUntil.IsZero() {
+			nullFields = append(nullFields, "Retention")
+		} else {
+			attrs.Retention = uattrs.Retention
+			forceSendFields = append(forceSendFields, "Retention")
+		}
+	}
+	rawObj := attrs.toRawObject(params.bucket)
 	rawObj.ForceSendFields = forceSendFields
 	rawObj.NullFields = nullFields
-	call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full")
-	if err := applyConds("Update", gen, conds, call); err != nil {
+	call := c.raw.Objects.Patch(params.bucket, params.object, rawObj).Projection("full")
+	if err := applyConds("Update", params.gen, params.conds, call); err != nil {
 		return nil, err
 	}
 	if s.userProject != "" {
@@ -509,9 +523,14 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str
 	if uattrs.PredefinedACL != "" {
 		call.PredefinedAcl(uattrs.PredefinedACL)
 	}
-	if err := setEncryptionHeaders(call.Header(), encryptionKey, false); err != nil {
+	if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil {
 		return nil, err
 	}
+
+	if params.overrideRetention != nil {
+		call.OverrideUnlockedRetention(*params.overrideRetention)
+	}
+
 	var obj *raw.Object
 	var err error
 	err = run(ctx, func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }, s.retry, s.idempotent)
diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go
index eca9b294aa..2d5cf890e0 100644
--- a/vendor/cloud.google.com/go/storage/internal/version.go
+++ b/vendor/cloud.google.com/go/storage/internal/version.go
@@ -15,4 +15,4 @@
 package internal
 
 // Version is the current tagged release of the library.
-const Version = "1.35.1"
+const Version = "1.36.0"
diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go
index a16e512f5e..78ecbf0e89 100644
--- a/vendor/cloud.google.com/go/storage/storage.go
+++ b/vendor/cloud.google.com/go/storage/storage.go
@@ -879,16 +879,17 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) {
 // ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
 // Use BucketHandle.Object to get a handle.
 type ObjectHandle struct {
-	c              *Client
-	bucket         string
-	object         string
-	acl            ACLHandle
-	gen            int64 // a negative value indicates latest
-	conds          *Conditions
-	encryptionKey  []byte // AES-256 key
-	userProject    string // for requester-pays buckets
-	readCompressed bool   // Accept-Encoding: gzip
-	retry          *retryConfig
+	c                 *Client
+	bucket            string
+	object            string
+	acl               ACLHandle
+	gen               int64 // a negative value indicates latest
+	conds             *Conditions
+	encryptionKey     []byte // AES-256 key
+	userProject       string // for requester-pays buckets
+	readCompressed    bool   // Accept-Encoding: gzip
+	retry             *retryConfig
+	overrideRetention *bool
 }
 
 // ACL provides access to the object's access control list.
@@ -958,7 +959,15 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
 	}
 	isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0
 	opts := makeStorageOpts(isIdempotent, o.retry, o.userProject)
-	return o.c.tc.UpdateObject(ctx, o.bucket, o.object, &uattrs, o.gen, o.encryptionKey, o.conds, opts...)
+	return o.c.tc.UpdateObject(ctx,
+		&updateObjectParams{
+			bucket:            o.bucket,
+			object:            o.object,
+			uattrs:            &uattrs,
+			gen:               o.gen,
+			encryptionKey:     o.encryptionKey,
+			conds:             o.conds,
+			overrideRetention: o.overrideRetention}, opts...)
 }
 
 // BucketName returns the name of the bucket.
@@ -973,16 +982,19 @@ func (o *ObjectHandle) ObjectName() string {
 
 // ObjectAttrsToUpdate is used to update the attributes of an object.
 // Only fields set to non-nil values will be updated.
-// For all fields except CustomTime, set the field to its zero value to delete
-// it. CustomTime cannot be deleted or changed to an earlier time once set.
+// For all fields except CustomTime and Retention, set the field to its zero
+// value to delete it. CustomTime cannot be deleted or changed to an earlier
+// time once set. Retention can be deleted (only if the Mode is Unlocked) by
+// setting it to an empty value (not nil).
 //
-// For example, to change ContentType and delete ContentEncoding and
-// Metadata, use
+// For example, to change ContentType and delete ContentEncoding, Metadata and
+// Retention, use:
 //
 //	ObjectAttrsToUpdate{
 //	    ContentType: "text/html",
 //	    ContentEncoding: "",
 //	    Metadata: map[string]string{},
+//	    Retention: &ObjectRetention{},
 //	}
 type ObjectAttrsToUpdate struct {
 	EventBasedHold     optional.Bool
@@ -999,6 +1011,12 @@ type ObjectAttrsToUpdate struct {
 	// If not empty, applies a predefined set of access controls. ACL must be nil.
 	// See https://cloud.google.com/storage/docs/json_api/v1/objects/patch.
 	PredefinedACL string
+
+	// Retention contains the retention configuration for this object.
+	// Operations other than setting the retention for the first time or
+	// extending the RetainUntil time on the object retention must be done
+	// on an ObjectHandle with OverrideUnlockedRetention set to true.
+	Retention *ObjectRetention
 }
 
 // Delete deletes the single specified object.
@@ -1020,6 +1038,17 @@ func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle {
 	return &o2
 }
 
+// OverrideUnlockedRetention provides an option for overriding an Unlocked
+// Retention policy. This must be set to true in order to change a policy
+// from Unlocked to Locked, to set it to null, or to reduce its
+// RetainUntil attribute. It is not required for setting the ObjectRetention for
+// the first time nor for extending the RetainUntil time.
+func (o *ObjectHandle) OverrideUnlockedRetention(override bool) *ObjectHandle {
+	o2 := *o
+	o2.overrideRetention = &override
+	return &o2
+}
+
 // NewWriter returns a storage Writer that writes to the GCS object
 // associated with this ObjectHandle.
 //
@@ -1109,6 +1138,7 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
 		Acl:                     toRawObjectACL(o.ACL),
 		Metadata:                o.Metadata,
 		CustomTime:              ct,
+		Retention:               o.Retention.toRawObjectRetention(),
 	}
 }
 
@@ -1344,6 +1374,42 @@ type ObjectAttrs struct {
 	// For non-composite objects, the value will be zero.
 	// This field is read-only.
 	ComponentCount int64
+
+	// Retention contains the retention configuration for this object.
+	// ObjectRetention cannot be configured or reported through the gRPC API.
+	Retention *ObjectRetention
+}
+
+// ObjectRetention contains the retention configuration for this object.
+type ObjectRetention struct {
+	// Mode is the retention policy's mode on this object. Valid values are
+	// "Locked" and "Unlocked".
+	// Locked retention policies cannot be changed. Unlocked policies require an
+	// override to change.
+	Mode string
+
+	// RetainUntil is the time this object will be retained until.
+	RetainUntil time.Time
+}
+
+func (r *ObjectRetention) toRawObjectRetention() *raw.ObjectRetention {
+	if r == nil {
+		return nil
+	}
+	return &raw.ObjectRetention{
+		Mode:            r.Mode,
+		RetainUntilTime: r.RetainUntil.Format(time.RFC3339),
+	}
+}
+
+func toObjectRetention(r *raw.ObjectRetention) *ObjectRetention {
+	if r == nil {
+		return nil
+	}
+	return &ObjectRetention{
+		Mode:        r.Mode,
+		RetainUntil: convertTime(r.RetainUntilTime),
+	}
 }
 
 // convertTime converts a time in RFC3339 format to time.Time.
@@ -1415,6 +1481,7 @@ func newObject(o *raw.Object) *ObjectAttrs {
 		Etag:                    o.Etag,
 		CustomTime:              convertTime(o.CustomTime),
 		ComponentCount:          o.ComponentCount,
+		Retention:               toObjectRetention(o.Retention),
 	}
 }
 
@@ -1587,6 +1654,7 @@ var attrToFieldMap = map[string]string{
 	"Etag":                    "etag",
 	"CustomTime":              "customTime",
 	"ComponentCount":          "componentCount",
+	"Retention":               "retention",
 }
 
 // attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field
@@ -1621,6 +1689,7 @@ var attrToProtoFieldMap = map[string]string{
 	"ComponentCount":          "component_count",
 	// MediaLink was explicitly excluded from the proto as it is an HTTP-ism.
 	// "MediaLink":               "mediaLink",
+	// TODO: add object retention - b/308194853
 }
 
 // SetAttrSelection makes the query populate only specific attributes of
@@ -1806,7 +1875,7 @@ func (c *Conditions) isMetagenerationValid() bool {
 func applyConds(method string, gen int64, conds *Conditions, call interface{}) error {
 	cval := reflect.ValueOf(call)
 	if gen >= 0 {
-		if !setConditionField(cval, "Generation", gen) {
+		if !setGeneration(cval, gen) {
 			return fmt.Errorf("storage: %s: generation not supported", method)
 		}
 	}
@@ -1818,25 +1887,25 @@ func applyConds(method string, gen int64, conds *Conditions, call interface{}) e
 	}
 	switch {
 	case conds.GenerationMatch != 0:
-		if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) {
+		if !setIfGenerationMatch(cval, conds.GenerationMatch) {
 			return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method)
 		}
 	case conds.GenerationNotMatch != 0:
-		if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) {
+		if !setIfGenerationNotMatch(cval, conds.GenerationNotMatch) {
 			return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method)
 		}
 	case conds.DoesNotExist:
-		if !setConditionField(cval, "IfGenerationMatch", int64(0)) {
+		if !setIfGenerationMatch(cval, int64(0)) {
 			return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
 		}
 	}
 	switch {
 	case conds.MetagenerationMatch != 0:
-		if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) {
+		if !setIfMetagenerationMatch(cval, conds.MetagenerationMatch) {
 			return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
 		}
 	case conds.MetagenerationNotMatch != 0:
-		if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) {
+		if !setIfMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) {
 			return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
 		}
 	}
@@ -1897,16 +1966,45 @@ func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.Rewrite
 	return nil
 }
 
-// setConditionField sets a field on a *raw.WhateverCall.
+// setGeneration sets Generation on a *raw.WhateverCall.
 // We can't use anonymous interfaces because the return type is
 // different, since the field setters are builders.
-func setConditionField(call reflect.Value, name string, value interface{}) bool {
-	m := call.MethodByName(name)
-	if !m.IsValid() {
-		return false
+// We also make sure to supply a compile-time constant to MethodByName;
+// otherwise, the Go Linker will disable dead code elimination, leading
+// to larger binaries for all packages that import storage.
+func setGeneration(cval reflect.Value, value interface{}) bool {
+	return setCondition(cval.MethodByName("Generation"), value)
+}
+
+// setIfGenerationMatch sets IfGenerationMatch on a *raw.WhateverCall.
+// See also setGeneration.
+func setIfGenerationMatch(cval reflect.Value, value interface{}) bool {
+	return setCondition(cval.MethodByName("IfGenerationMatch"), value)
+}
+
+// setIfGenerationNotMatch sets IfGenerationNotMatch on a *raw.WhateverCall.
+// See also setGeneration.
+func setIfGenerationNotMatch(cval reflect.Value, value interface{}) bool {
+	return setCondition(cval.MethodByName("IfGenerationNotMatch"), value)
+}
+
+// setIfMetagenerationMatch sets IfMetagenerationMatch on a *raw.WhateverCall.
+// See also setGeneration.
+func setIfMetagenerationMatch(cval reflect.Value, value interface{}) bool {
+	return setCondition(cval.MethodByName("IfMetagenerationMatch"), value)
+}
+
+// setIfMetagenerationNotMatch sets IfMetagenerationNotMatch on a *raw.WhateverCall.
+// See also setGeneration.
+func setIfMetagenerationNotMatch(cval reflect.Value, value interface{}) bool {
+	return setCondition(cval.MethodByName("IfMetagenerationNotMatch"), value)
+}
+
+func setCondition(setter reflect.Value, value interface{}) bool {
+	if setter.IsValid() {
+		setter.Call([]reflect.Value{reflect.ValueOf(value)})
 	}
-	m.Call([]reflect.Value{reflect.ValueOf(value)})
-	return true
+	return setter.IsValid()
 }
 
 // Retryer returns an object handle that is configured with custom retry
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
index 6e5e80087d..284ea54e3c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
@@ -1,5 +1,24 @@
 # Release History
 
+## 1.2.1 (2023-12-13)
+
+### Features Added
+
+* Exposed GetSASURL from specialized clients
+
+### Bugs Fixed
+
+* Fixed case in Blob Batch API when blob path has / in it. Fixes [#21649](https://github.com/Azure/azure-sdk-for-go/issues/21649).
+* Fixed SharedKeyMissingError when using client.BlobClient().GetSASURL() method
+* Fixed an issue that would cause metadata keys with empty values to be omitted when enumerating blobs.
+* Fixed an issue where passing empty map to set blob tags API was causing panic. Fixes [#21869](https://github.com/Azure/azure-sdk-for-go/issues/21869).
+* Fixed an issue where downloaded file has incorrect size when not a multiple of block size. Fixes [#21995](https://github.com/Azure/azure-sdk-for-go/issues/21995).
+* Fixed case where `io.ErrUnexpectedEOF` was treated as expected error in `UploadStream`. Fixes [#21837](https://github.com/Azure/azure-sdk-for-go/issues/21837).
+
+### Other Changes
+
+* Updated the version of `azcore` to `1.9.1` and `azidentity` to `1.4.0`.
+
 ## 1.2.0 (2023-10-11)
 
 ### Bugs Fixed
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go
index 69913e334d..2229b7d85e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go
@@ -10,6 +10,7 @@ import (
 	"context"
 	"errors"
 	"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
+	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
 	"io"
 	"os"
 	"time"
@@ -338,6 +339,12 @@ func (ab *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co
 	return blob.CopyFromURLResponse{}, errors.New("operation will not work on this blob type. CopyFromURL works only with block blob")
 }
 
+// GetSASURL is a convenience method for generating a SAS token for the currently pointed at append blob.
+// It can only be used if the credential supplied during creation was a SharedKeyCredential.
+func (ab *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) {
+	return ab.BlobClient().GetSASURL(permissions, expiry, o)
+}
+
 // Concurrent Download Functions -----------------------------------------------------------------------------------------
 
 // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json
index ee07ad45b1..80d6183c5b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json
@@ -2,5 +2,5 @@
   "AssetsRepo": "Azure/azure-sdk-assets",
   "AssetsRepoPrefixPath": "go",
   "TagPrefix": "go/storage/azblob",
-  "Tag": "go/storage/azblob_818d8addd0"
+  "Tag": "go/storage/azblob_0040e8284c"
 }
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go
index 55de9b3495..d2421ddd91 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go
@@ -464,7 +464,7 @@ func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadO
 
 	buffers := shared.NewMMBPool(int(o.Concurrency), o.BlockSize)
 	defer buffers.Free()
-	aquireBuffer := func() ([]byte, error) {
+	acquireBuffer := func() ([]byte, error) {
 		select {
 		case b := <-buffers.Acquire():
 			// got a buffer
@@ -489,21 +489,23 @@ func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadO
 	/*
 	 * We have created as many channels as the number of chunks we have.
 	 * Each downloaded block will be sent to the channel matching its
-	 * sequece number, i.e. 0th block is sent to 0th channel, 1st block
+	 * sequence number, i.e. 0th block is sent to 0th channel, 1st block
 	 * to 1st channel and likewise. The blocks are then read and written
 	 * to the file serially by below goroutine. Do note that the blocks
-	 * blocks are still downloaded parallelly from n/w, only serailized
+	 * are still downloaded parallelly from n/w, only serialized
 	 * and written to file here.
 	 */
 	writerError := make(chan error)
+	writeSize := int64(0)
 	go func(ch chan error) {
 		for _, block := range blocks {
 			select {
 			case <-ctx.Done():
 				return
 			case block := <-block:
-				_, err := writer.Write(block)
-				buffers.Release(block)
+				n, err := writer.Write(block)
+				writeSize += int64(n)
+				buffers.Release(block[:cap(block)])
 				if err != nil {
 					ch <- err
 					return
@@ -521,7 +523,7 @@ func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadO
 		NumChunks:     numChunks,
 		Concurrency:   o.Concurrency,
 		Operation: func(ctx context.Context, chunkStart int64, count int64) error {
-			buff, err := aquireBuffer()
+			buff, err := acquireBuffer()
 			if err != nil {
 				return err
 			}
@@ -538,8 +540,8 @@ func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadO
 				return err
 			}
 
-			blockIndex := (chunkStart / o.BlockSize)
-			blocks[blockIndex] <- buff
+			blockIndex := chunkStart / o.BlockSize
+			blocks[blockIndex] <- buff[:count]
 			return nil
 		},
 	})
@@ -551,7 +553,7 @@ func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadO
 	if err = <-writerError; err != nil {
 		return 0, err
 	}
-	return count, nil
+	return writeSize, nil
 }
 
 // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go
index 5a79c12d43..d733468894 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go
@@ -51,7 +51,7 @@ type Tags = generated.BlobTag
 
 // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and
 // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange
-// which has an offset but no zero value count indicates from the offset to the resource's end.
+// which has an offset and zero value count indicates from the offset to the resource's end.
 type HTTPRange = exported.HTTPRange
 
 // Request Model Declaration -------------------------------------------------------------------------------------------
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go
index 8a1573c0ce..07fad60611 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go
@@ -69,6 +69,7 @@ const (
 	CopyIDMismatch                                    Code = "CopyIdMismatch"
 	EmptyMetadataKey                                  Code = "EmptyMetadataKey"
 	FeatureVersionMismatch                            Code = "FeatureVersionMismatch"
+	ImmutabilityPolicyDeleteOnLockedPolicy            Code = "ImmutabilityPolicyDeleteOnLockedPolicy"
 	IncrementalCopyBlobMismatch                       Code = "IncrementalCopyBlobMismatch"
 	IncrementalCopyOfEralierVersionSnapshotNotAllowed Code = "IncrementalCopyOfEralierVersionSnapshotNotAllowed"
 	IncrementalCopySourceMustBeSnapshot               Code = "IncrementalCopySourceMustBeSnapshot"
@@ -122,6 +123,7 @@ const (
 	NoAuthenticationInformation                       Code = "NoAuthenticationInformation"
 	NoPendingCopyOperation                            Code = "NoPendingCopyOperation"
 	OperationNotAllowedOnIncrementalCopyBlob          Code = "OperationNotAllowedOnIncrementalCopyBlob"
+	OperationNotAllowedOnRootBlob                     Code = "OperationNotAllowedOnRootBlob"
 	OperationTimedOut                                 Code = "OperationTimedOut"
 	OutOfRangeInput                                   Code = "OutOfRangeInput"
 	OutOfRangeQueryParameterValue                     Code = "OutOfRangeQueryParameterValue"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go
index 212255d4c6..24df42c75e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go
@@ -75,7 +75,7 @@ func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWrit
 		}
 
 		var n int
-		n, err = io.ReadFull(src, buffer)
+		n, err = shared.ReadAtLeast(src, buffer, len(buffer))
 
 		if n > 0 {
 			// some data was read, upload it
@@ -108,7 +108,7 @@ func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWrit
 		}
 
 		if err != nil { // The reader is done, no more outgoing buffers
-			if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
+			if errors.Is(err, io.EOF) {
 				// these are expected errors, we don't surface those
 				err = nil
 			} else {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go
index 8b542f85a8..e3167b7747 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go
@@ -13,6 +13,7 @@ import (
 	"errors"
 	"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
 	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
+	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
 	"io"
 	"math"
 	"os"
@@ -129,7 +130,7 @@ func (bb *Client) URL() string {
 	return bb.generated().Endpoint()
 }
 
-// BlobClient returns the embedded blob client for this AppendBlob client.
+// BlobClient returns the embedded blob client for this BlockBlob client.
 func (bb *Client) BlobClient() *blob.Client {
 	blobClient, _ := base.InnerClients((*base.CompositeClient[generated.BlobClient, generated.BlockBlobClient])(bb))
 	return (*blob.Client)(blobClient)
@@ -410,6 +411,12 @@ func (bb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co
 	return bb.BlobClient().CopyFromURL(ctx, copySource, o)
 }
 
+// GetSASURL is a convenience method for generating a SAS token for the currently pointed at block blob.
+// It can only be used if the credential supplied during creation was a SharedKeyCredential.
+func (bb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) {
+	return bb.BlobClient().GetSASURL(permissions, expiry, o)
+}
+
 // Concurrent Upload Functions -----------------------------------------------------------------------------------------
 
 // uploadFromReader uploads a buffer in blocks to a block blob.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml
index f5100e1311..0303503389 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml
@@ -26,6 +26,7 @@ stages:
     parameters:
       ServiceDirectory: 'storage/azblob'
       RunLiveTests: true
+      UsePipelineProxy: false
       EnvVars:
         AZURE_CLIENT_ID: $(AZBLOB_CLIENT_ID)
         AZURE_TENANT_ID: $(AZBLOB_TENANT_ID)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go
index 560e151d55..48771e8c9c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/common.go
@@ -32,5 +32,5 @@ func ParseURL(u string) (URLParts, error) {
 
 // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and
 // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange
-// which has an offset but no zero value count indicates from the offset to the resource's end.
+// which has an offset and zero value count indicates from the offset to the resource's end.
 type HTTPRange = exported.HTTPRange
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go
index 0bdbaefaf2..c95f19254a 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go
@@ -71,7 +71,10 @@ type CompositeClient[T, U any] struct {
 }
 
 func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) {
-	return &Client[T]{inner: client.innerT}, client.innerU
+	return &Client[T]{
+		inner:      client.innerT,
+		credential: client.sharedKey,
+	}, client.innerU
 }
 
 func NewAppendBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go
index 64a88688a4..02966ee3e9 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/blob_batch.go
@@ -49,7 +49,7 @@ func createBatchID() (string, error) {
 // Content-Length: 0
 func buildSubRequest(req *policy.Request) []byte {
 	var batchSubRequest strings.Builder
-	blobPath := req.Raw().URL.Path
+	blobPath := req.Raw().URL.EscapedPath()
 	if len(req.Raw().URL.RawQuery) > 0 {
 		blobPath += "?" + req.Raw().URL.RawQuery
 	}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go
index 9bc1ca47df..d0355727c9 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/exported.go
@@ -13,7 +13,7 @@ import (
 
 // HTTPRange defines a range of bytes within an HTTP resource, starting at offset and
 // ending at offset+count. A zero-value HTTPRange indicates the entire resource. An HTTPRange
-// which has an offset but no zero value count indicates from the offset to the resource's end.
+// which has an offset and zero value count indicates from the offset to the resource's end.
 type HTTPRange struct {
 	Offset int64
 	Count  int64
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go
index 935debca3d..c8be74c297 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go
@@ -8,5 +8,5 @@ package exported
 
 const (
 	ModuleName    = "azblob"
-	ModuleVersion = "v1.2.0"
+	ModuleVersion = "v1.2.1"
 )
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md
index 367f020f4d..25deeec358 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md
@@ -19,7 +19,7 @@ modelerfour:
   seal-single-value-enum-by-default: true
   lenient-model-deduplication: true
 export-clients: true
-use: "@autorest/go@4.0.0-preview.49"
+use: "@autorest/go@4.0.0-preview.61"
 ```
 
 ### Updating service version to 2023-08-03
@@ -280,7 +280,9 @@ directive:
 
 ``` yaml
 directive:
-- from: zz_models.go
+- from:
+  - zz_models.go
+  - zz_options.go
   where: $
   transform: >-
     return $.
@@ -443,8 +445,8 @@ directive:
   where: $
   transform: >-
     return $.
-      replace(/if\s+!runtime\.HasStatusCode\(resp,\s+http\.StatusOK\)\s+\{\s*\n\t\treturn\s+ServiceClientSubmitBatchResponse\{\}\,\s+runtime\.NewResponseError\(resp\)\s*\n\t\}/g, 
-      `if !runtime.HasStatusCode(resp, http.StatusAccepted) {\n\t\treturn ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp)\n\t}`);
+      replace(/if\s+!runtime\.HasStatusCode\(httpResp,\s+http\.StatusOK\)\s+\{\s+err\s+=\s+runtime\.NewResponseError\(httpResp\)\s+return ServiceClientSubmitBatchResponse\{\}\,\s+err\s+}/g, 
+      `if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {\n\t\terr = runtime.NewResponseError(httpResp)\n\t\treturn ServiceClientSubmitBatchResponse{}, err\n\t}`);
 ```
 
 ### Convert time to GMT for If-Modified-Since and If-Unmodified-Since request headers
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go
index 32be22221d..dbfe069e6f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
@@ -44,18 +43,21 @@ type AppendBlobClient struct {
 //   - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *AppendBlobClientAppendBlockOptions, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientAppendBlockResponse, error) {
+	var err error
 	req, err := client.appendBlockCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, appendPositionAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
 	if err != nil {
 		return AppendBlobClientAppendBlockResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return AppendBlobClientAppendBlockResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return AppendBlobClientAppendBlockResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return AppendBlobClientAppendBlockResponse{}, err
 	}
-	return client.appendBlockHandleResponse(resp)
+	resp, err := client.appendBlockHandleResponse(httpResp)
+	return resp, err
 }
 
 // appendBlockCreateRequest creates the AppendBlock request.
@@ -127,46 +129,6 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co
 // appendBlockHandleResponse handles the AppendBlock response.
 func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockResponse, error) {
 	result := AppendBlobClientAppendBlockResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return AppendBlobClientAppendBlockResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return AppendBlobClientAppendBlockResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
-	}
-	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
-		contentCRC64, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return AppendBlobClientAppendBlockResponse{}, err
-		}
-		result.ContentCRC64 = contentCRC64
-	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return AppendBlobClientAppendBlockResponse{}, err
-		}
-		result.Date = &date
-	}
 	if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" {
 		result.BlobAppendOffset = &val
 	}
@@ -178,6 +140,39 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (
 		}
 		result.BlobCommittedBlockCount = &blobCommittedBlockCount
 	}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
+		contentCRC64, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return AppendBlobClientAppendBlockResponse{}, err
+		}
+		result.ContentCRC64 = contentCRC64
+	}
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return AppendBlobClientAppendBlockResponse{}, err
+		}
+		result.ContentMD5 = contentMD5
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return AppendBlobClientAppendBlockResponse{}, err
+		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
 	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
 		isServerEncrypted, err := strconv.ParseBool(val)
 		if err != nil {
@@ -185,11 +180,18 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (
 		}
 		result.IsServerEncrypted = &isServerEncrypted
 	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return AppendBlobClientAppendBlockResponse{}, err
+		}
+		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
 	}
 	return result, nil
 }
@@ -213,18 +215,21 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (
 //   - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL
 //     method.
 func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceURL string, contentLength int64, options *AppendBlobClientAppendBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (AppendBlobClientAppendBlockFromURLResponse, error) {
+	var err error
 	req, err := client.appendBlockFromURLCreateRequest(ctx, sourceURL, contentLength, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, appendPositionAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
 	if err != nil {
 		return AppendBlobClientAppendBlockFromURLResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return AppendBlobClientAppendBlockFromURLResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return AppendBlobClientAppendBlockFromURLResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return AppendBlobClientAppendBlockFromURLResponse{}, err
 	}
-	return client.appendBlockFromURLHandleResponse(resp)
+	resp, err := client.appendBlockFromURLHandleResponse(httpResp)
+	return resp, err
 }
 
 // appendBlockFromURLCreateRequest creates the AppendBlockFromURL request.
@@ -315,43 +320,6 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont
 // appendBlockFromURLHandleResponse handles the AppendBlockFromURL response.
 func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Response) (AppendBlobClientAppendBlockFromURLResponse, error) {
 	result := AppendBlobClientAppendBlockFromURLResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return AppendBlobClientAppendBlockFromURLResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return AppendBlobClientAppendBlockFromURLResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
-	}
-	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
-		contentCRC64, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return AppendBlobClientAppendBlockFromURLResponse{}, err
-		}
-		result.ContentCRC64 = contentCRC64
-	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return AppendBlobClientAppendBlockFromURLResponse{}, err
-		}
-		result.Date = &date
-	}
 	if val := resp.Header.Get("x-ms-blob-append-offset"); val != "" {
 		result.BlobAppendOffset = &val
 	}
@@ -363,6 +331,30 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp
 		}
 		result.BlobCommittedBlockCount = &blobCommittedBlockCount
 	}
+	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
+		contentCRC64, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return AppendBlobClientAppendBlockFromURLResponse{}, err
+		}
+		result.ContentCRC64 = contentCRC64
+	}
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return AppendBlobClientAppendBlockFromURLResponse{}, err
+		}
+		result.ContentMD5 = contentMD5
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return AppendBlobClientAppendBlockFromURLResponse{}, err
+		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
 	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
 		result.EncryptionKeySHA256 = &val
 	}
@@ -376,6 +368,19 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp
 		}
 		result.IsServerEncrypted = &isServerEncrypted
 	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return AppendBlobClientAppendBlockFromURLResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -391,18 +396,21 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp
 //   - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, options *AppendBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (AppendBlobClientCreateResponse, error) {
+	var err error
 	req, err := client.createCreateRequest(ctx, contentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
 	if err != nil {
 		return AppendBlobClientCreateResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return AppendBlobClientCreateResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return AppendBlobClientCreateResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return AppendBlobClientCreateResponse{}, err
 	}
-	return client.createHandleResponse(resp)
+	resp, err := client.createHandleResponse(httpResp)
+	return resp, err
 }
 
 // createCreateRequest creates the Create request.
@@ -496,15 +504,8 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content
 // createHandleResponse handles the Create response.
 func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (AppendBlobClientCreateResponse, error) {
 	result := AppendBlobClientCreateResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return AppendBlobClientCreateResponse{}, err
-		}
-		result.LastModified = &lastModified
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
 	}
 	if val := resp.Header.Get("Content-MD5"); val != "" {
 		contentMD5, err := base64.StdEncoding.DecodeString(val)
@@ -513,8 +514,35 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen
 		}
 		result.ContentMD5 = contentMD5
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return AppendBlobClientCreateResponse{}, err
+		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
+	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
+		isServerEncrypted, err := strconv.ParseBool(val)
+		if err != nil {
+			return AppendBlobClientCreateResponse{}, err
+		}
+		result.IsServerEncrypted = &isServerEncrypted
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return AppendBlobClientCreateResponse{}, err
+		}
+		result.LastModified = &lastModified
 	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
@@ -525,26 +553,6 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen
 	if val := resp.Header.Get("x-ms-version-id"); val != "" {
 		result.VersionID = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return AppendBlobClientCreateResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
-		isServerEncrypted, err := strconv.ParseBool(val)
-		if err != nil {
-			return AppendBlobClientCreateResponse{}, err
-		}
-		result.IsServerEncrypted = &isServerEncrypted
-	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
-	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
-	}
 	return result, nil
 }
 
@@ -559,18 +567,21 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen
 //   - AppendPositionAccessConditions - AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock
 //     method.
 func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobClientSealOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, appendPositionAccessConditions *AppendPositionAccessConditions) (AppendBlobClientSealResponse, error) {
+	var err error
 	req, err := client.sealCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions, appendPositionAccessConditions)
 	if err != nil {
 		return AppendBlobClientSealResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return AppendBlobClientSealResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return AppendBlobClientSealResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return AppendBlobClientSealResponse{}, err
 	}
-	return client.sealHandleResponse(resp)
+	resp, err := client.sealHandleResponse(httpResp)
+	return resp, err
 }
 
 // sealCreateRequest creates the Seal request.
@@ -614,25 +625,9 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *
 // sealHandleResponse handles the Seal response.
 func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendBlobClientSealResponse, error) {
 	result := AppendBlobClientSealResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return AppendBlobClientSealResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -640,6 +635,9 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
 	if val := resp.Header.Get("x-ms-blob-sealed"); val != "" {
 		isSealed, err := strconv.ParseBool(val)
 		if err != nil {
@@ -647,5 +645,18 @@ func (client *AppendBlobClient) sealHandleResponse(resp *http.Response) (AppendB
 		}
 		result.IsSealed = &isSealed
 	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return AppendBlobClientSealResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go
index 257a3656dd..caaa3dfed7 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
@@ -38,18 +37,21 @@ type BlobClient struct {
 //   - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method.
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, options *BlobClientAbortCopyFromURLOptions, leaseAccessConditions *LeaseAccessConditions) (BlobClientAbortCopyFromURLResponse, error) {
+	var err error
 	req, err := client.abortCopyFromURLCreateRequest(ctx, copyID, options, leaseAccessConditions)
 	if err != nil {
 		return BlobClientAbortCopyFromURLResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientAbortCopyFromURLResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusNoContent) {
-		return BlobClientAbortCopyFromURLResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusNoContent) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientAbortCopyFromURLResponse{}, err
 	}
-	return client.abortCopyFromURLHandleResponse(resp)
+	resp, err := client.abortCopyFromURLHandleResponse(httpResp)
+	return resp, err
 }
 
 // abortCopyFromURLCreateRequest creates the AbortCopyFromURL request.
@@ -83,12 +85,6 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -96,6 +92,12 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -109,18 +111,21 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B
 //   - options - BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, options *BlobClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientAcquireLeaseResponse, error) {
+	var err error
 	req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientAcquireLeaseResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientAcquireLeaseResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return BlobClientAcquireLeaseResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientAcquireLeaseResponse{}, err
 	}
-	return client.acquireLeaseHandleResponse(resp)
+	resp, err := client.acquireLeaseHandleResponse(httpResp)
+	return resp, err
 }
 
 // acquireLeaseCreateRequest creates the AcquireLease request.
@@ -166,6 +171,16 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio
 // acquireLeaseHandleResponse handles the AcquireLease response.
 func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobClientAcquireLeaseResponse, error) {
 	result := BlobClientAcquireLeaseResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientAcquireLeaseResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -179,22 +194,12 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC
 	if val := resp.Header.Get("x-ms-lease-id"); val != "" {
 		result.LeaseID = &val
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientAcquireLeaseResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -205,18 +210,21 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC
 //   - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) {
+	var err error
 	req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientBreakLeaseResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientBreakLeaseResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusAccepted) {
-		return BlobClientBreakLeaseResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientBreakLeaseResponse{}, err
 	}
-	return client.breakLeaseHandleResponse(resp)
+	resp, err := client.breakLeaseHandleResponse(httpResp)
+	return resp, err
 }
 
 // breakLeaseCreateRequest creates the BreakLease request.
@@ -261,6 +269,16 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *
 // breakLeaseHandleResponse handles the BreakLease response.
 func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobClientBreakLeaseResponse, error) {
 	result := BlobClientBreakLeaseResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientBreakLeaseResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -279,22 +297,12 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli
 		}
 		result.LeaseTime = &leaseTime
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientBreakLeaseResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -309,18 +317,21 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli
 //   - options - BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *BlobClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientChangeLeaseResponse, error) {
+	var err error
 	req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientChangeLeaseResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientChangeLeaseResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientChangeLeaseResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientChangeLeaseResponse{}, err
 	}
-	return client.changeLeaseHandleResponse(resp)
+	resp, err := client.changeLeaseHandleResponse(httpResp)
+	return resp, err
 }
 
 // changeLeaseCreateRequest creates the ChangeLease request.
@@ -364,6 +375,16 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID
 // changeLeaseHandleResponse handles the ChangeLease response.
 func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobClientChangeLeaseResponse, error) {
 	result := BlobClientChangeLeaseResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientChangeLeaseResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -374,25 +395,15 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
+	if val := resp.Header.Get("x-ms-lease-id"); val != "" {
+		result.LeaseID = &val
 	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-lease-id"); val != "" {
-		result.LeaseID = &val
-	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientChangeLeaseResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -411,18 +422,21 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 //   - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
 func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (BlobClientCopyFromURLResponse, error) {
+	var err error
 	req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, cpkScopeInfo)
 	if err != nil {
 		return BlobClientCopyFromURLResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientCopyFromURLResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusAccepted) {
-		return BlobClientCopyFromURLResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientCopyFromURLResponse{}, err
 	}
-	return client.copyFromURLHandleResponse(resp)
+	resp, err := client.copyFromURLHandleResponse(httpResp)
+	return resp, err
 }
 
 // copyFromURLCreateRequest creates the CopyFromURL request.
@@ -513,9 +527,42 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour
 // copyFromURLHandleResponse handles the CopyFromURL response.
 func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobClientCopyFromURLResponse, error) {
 	result := BlobClientCopyFromURLResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
+		contentCRC64, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return BlobClientCopyFromURLResponse{}, err
+		}
+		result.ContentCRC64 = contentCRC64
+	}
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return BlobClientCopyFromURLResponse{}, err
+		}
+		result.ContentMD5 = contentMD5
+	}
+	if val := resp.Header.Get("x-ms-copy-id"); val != "" {
+		result.CopyID = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-status"); val != "" {
+		result.CopyStatus = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientCopyFromURLResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
 	if val := resp.Header.Get("Last-Modified"); val != "" {
 		lastModified, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -523,9 +570,6 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
@@ -535,36 +579,6 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl
 	if val := resp.Header.Get("x-ms-version-id"); val != "" {
 		result.VersionID = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientCopyFromURLResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-copy-id"); val != "" {
-		result.CopyID = &val
-	}
-	if val := resp.Header.Get("x-ms-copy-status"); val != "" {
-		result.CopyStatus = &val
-	}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlobClientCopyFromURLResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
-	}
-	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
-		contentCRC64, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlobClientCopyFromURLResponse{}, err
-		}
-		result.ContentCRC64 = contentCRC64
-	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
-	}
 	return result, nil
 }
 
@@ -578,18 +592,21 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClientCreateSnapshotOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCreateSnapshotResponse, error) {
+	var err error
 	req, err := client.createSnapshotCreateRequest(ctx, options, cpkInfo, cpkScopeInfo, modifiedAccessConditions, leaseAccessConditions)
 	if err != nil {
 		return BlobClientCreateSnapshotResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientCreateSnapshotResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return BlobClientCreateSnapshotResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientCreateSnapshotResponse{}, err
 	}
-	return client.createSnapshotHandleResponse(resp)
+	resp, err := client.createSnapshotHandleResponse(httpResp)
+	return resp, err
 }
 
 // createSnapshotCreateRequest creates the CreateSnapshot request.
@@ -652,31 +669,9 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio
 // createSnapshotHandleResponse handles the CreateSnapshot response.
 func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (BlobClientCreateSnapshotResponse, error) {
 	result := BlobClientCreateSnapshotResponse{}
-	if val := resp.Header.Get("x-ms-snapshot"); val != "" {
-		result.Snapshot = &val
-	}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientCreateSnapshotResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
-	if val := resp.Header.Get("x-ms-version-id"); val != "" {
-		result.VersionID = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -684,6 +679,9 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
 	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
 		isServerEncrypted, err := strconv.ParseBool(val)
 		if err != nil {
@@ -691,6 +689,25 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo
 		}
 		result.IsServerEncrypted = &isServerEncrypted
 	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientCreateSnapshotResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-snapshot"); val != "" {
+		result.Snapshot = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
+	if val := resp.Header.Get("x-ms-version-id"); val != "" {
+		result.VersionID = &val
+	}
 	return result, nil
 }
 
@@ -712,18 +729,21 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDeleteResponse, error) {
+	var err error
 	req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientDeleteResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientDeleteResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusAccepted) {
-		return BlobClientDeleteResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientDeleteResponse{}, err
 	}
-	return client.deleteHandleResponse(resp)
+	resp, err := client.deleteHandleResponse(httpResp)
+	return resp, err
 }
 
 // deleteCreateRequest creates the Delete request.
@@ -781,12 +801,6 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -794,6 +808,12 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -804,18 +824,21 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD
 //   - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy
 //     method.
 func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) {
+	var err error
 	req, err := client.deleteImmutabilityPolicyCreateRequest(ctx, options)
 	if err != nil {
 		return BlobClientDeleteImmutabilityPolicyResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientDeleteImmutabilityPolicyResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientDeleteImmutabilityPolicyResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientDeleteImmutabilityPolicyResponse{}, err
 	}
-	return client.deleteImmutabilityPolicyHandleResponse(resp)
+	resp, err := client.deleteImmutabilityPolicyHandleResponse(httpResp)
+	return resp, err
 }
 
 // deleteImmutabilityPolicyCreateRequest creates the DeleteImmutabilityPolicy request.
@@ -844,12 +867,6 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -857,6 +874,12 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -870,18 +893,21 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp
 //   - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownloadOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientDownloadResponse, error) {
+	var err error
 	req, err := client.downloadCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientDownloadResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientDownloadResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) {
-		return BlobClientDownloadResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent, http.StatusNotModified) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientDownloadResponse{}, err
 	}
-	return client.downloadHandleResponse(resp)
+	resp, err := client.downloadHandleResponse(httpResp)
+	return resp, err
 }
 
 // downloadCreateRequest creates the Download request.
@@ -949,12 +975,97 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl
 // downloadHandleResponse handles the Download response.
 func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClientDownloadResponse, error) {
 	result := BlobClientDownloadResponse{Body: resp.Body}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
+	if val := resp.Header.Get("Accept-Ranges"); val != "" {
+		result.AcceptRanges = &val
+	}
+	if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" {
+		blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
+		blobCommittedBlockCount := int32(blobCommittedBlockCount32)
 		if err != nil {
 			return BlobClientDownloadResponse{}, err
 		}
-		result.LastModified = &lastModified
+		result.BlobCommittedBlockCount = &blobCommittedBlockCount
+	}
+	if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" {
+		blobContentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.BlobContentMD5 = blobContentMD5
+	}
+	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
+		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.BlobSequenceNumber = &blobSequenceNumber
+	}
+	if val := resp.Header.Get("x-ms-blob-type"); val != "" {
+		result.BlobType = (*BlobType)(&val)
+	}
+	if val := resp.Header.Get("Cache-Control"); val != "" {
+		result.CacheControl = &val
+	}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
+		contentCRC64, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.ContentCRC64 = contentCRC64
+	}
+	if val := resp.Header.Get("Content-Disposition"); val != "" {
+		result.ContentDisposition = &val
+	}
+	if val := resp.Header.Get("Content-Encoding"); val != "" {
+		result.ContentEncoding = &val
+	}
+	if val := resp.Header.Get("Content-Language"); val != "" {
+		result.ContentLanguage = &val
+	}
+	if val := resp.Header.Get("Content-Length"); val != "" {
+		contentLength, err := strconv.ParseInt(val, 10, 64)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.ContentLength = &contentLength
+	}
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.ContentMD5 = contentMD5
+	}
+	if val := resp.Header.Get("Content-Range"); val != "" {
+		result.ContentRange = &val
+	}
+	if val := resp.Header.Get("Content-Type"); val != "" {
+		result.ContentType = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" {
+		copyCompletionTime, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.CopyCompletionTime = &copyCompletionTime
+	}
+	if val := resp.Header.Get("x-ms-copy-id"); val != "" {
+		result.CopyID = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-progress"); val != "" {
+		result.CopyProgress = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-source"); val != "" {
+		result.CopySource = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-status"); val != "" {
+		result.CopyStatus = (*CopyStatusType)(&val)
+	}
+	if val := resp.Header.Get("x-ms-copy-status-description"); val != "" {
+		result.CopyStatusDescription = &val
 	}
 	if val := resp.Header.Get("x-ms-creation-time"); val != "" {
 		creationTime, err := time.Parse(time.RFC1123, val)
@@ -963,6 +1074,86 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien
 		}
 		result.CreationTime = &creationTime
 	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
+	if val := resp.Header.Get("x-ms-error-code"); val != "" {
+		result.ErrorCode = &val
+	}
+	if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" {
+		immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn
+	}
+	if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" {
+		result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val)
+	}
+	if val := resp.Header.Get("x-ms-is-current-version"); val != "" {
+		isCurrentVersion, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.IsCurrentVersion = &isCurrentVersion
+	}
+	if val := resp.Header.Get("x-ms-blob-sealed"); val != "" {
+		isSealed, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.IsSealed = &isSealed
+	}
+	if val := resp.Header.Get("x-ms-server-encrypted"); val != "" {
+		isServerEncrypted, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.IsServerEncrypted = &isServerEncrypted
+	}
+	if val := resp.Header.Get("x-ms-last-access-time"); val != "" {
+		lastAccessed, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.LastAccessed = &lastAccessed
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-lease-duration"); val != "" {
+		result.LeaseDuration = (*LeaseDurationType)(&val)
+	}
+	if val := resp.Header.Get("x-ms-lease-state"); val != "" {
+		result.LeaseState = (*LeaseStateType)(&val)
+	}
+	if val := resp.Header.Get("x-ms-lease-status"); val != "" {
+		result.LeaseStatus = (*LeaseStatusType)(&val)
+	}
+	if val := resp.Header.Get("x-ms-legal-hold"); val != "" {
+		legalHold, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientDownloadResponse{}, err
+		}
+		result.LegalHold = &legalHold
+	}
 	for hh := range resp.Header {
 		if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") {
 			if result.Metadata == nil {
@@ -982,139 +1173,9 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien
 			result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh))
 		}
 	}
-	if val := resp.Header.Get("Content-Length"); val != "" {
-		contentLength, err := strconv.ParseInt(val, 10, 64)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.ContentLength = &contentLength
-	}
-	if val := resp.Header.Get("Content-Type"); val != "" {
-		result.ContentType = &val
-	}
-	if val := resp.Header.Get("Content-Range"); val != "" {
-		result.ContentRange = &val
-	}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
-	}
-	if val := resp.Header.Get("Content-Encoding"); val != "" {
-		result.ContentEncoding = &val
-	}
-	if val := resp.Header.Get("Cache-Control"); val != "" {
-		result.CacheControl = &val
-	}
-	if val := resp.Header.Get("Content-Disposition"); val != "" {
-		result.ContentDisposition = &val
-	}
-	if val := resp.Header.Get("Content-Language"); val != "" {
-		result.ContentLanguage = &val
-	}
-	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
-		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.BlobSequenceNumber = &blobSequenceNumber
-	}
-	if val := resp.Header.Get("x-ms-blob-type"); val != "" {
-		result.BlobType = (*BlobType)(&val)
-	}
-	if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" {
-		copyCompletionTime, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.CopyCompletionTime = &copyCompletionTime
-	}
-	if val := resp.Header.Get("x-ms-copy-status-description"); val != "" {
-		result.CopyStatusDescription = &val
-	}
-	if val := resp.Header.Get("x-ms-copy-id"); val != "" {
-		result.CopyID = &val
-	}
-	if val := resp.Header.Get("x-ms-copy-progress"); val != "" {
-		result.CopyProgress = &val
-	}
-	if val := resp.Header.Get("x-ms-copy-source"); val != "" {
-		result.CopySource = &val
-	}
-	if val := resp.Header.Get("x-ms-copy-status"); val != "" {
-		result.CopyStatus = (*CopyStatusType)(&val)
-	}
-	if val := resp.Header.Get("x-ms-lease-duration"); val != "" {
-		result.LeaseDuration = (*LeaseDurationType)(&val)
-	}
-	if val := resp.Header.Get("x-ms-lease-state"); val != "" {
-		result.LeaseState = (*LeaseStateType)(&val)
-	}
-	if val := resp.Header.Get("x-ms-lease-status"); val != "" {
-		result.LeaseStatus = (*LeaseStatusType)(&val)
-	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
-	if val := resp.Header.Get("x-ms-version-id"); val != "" {
-		result.VersionID = &val
-	}
-	if val := resp.Header.Get("x-ms-is-current-version"); val != "" {
-		isCurrentVersion, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.IsCurrentVersion = &isCurrentVersion
-	}
-	if val := resp.Header.Get("Accept-Ranges"); val != "" {
-		result.AcceptRanges = &val
-	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" {
-		blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
-		blobCommittedBlockCount := int32(blobCommittedBlockCount32)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.BlobCommittedBlockCount = &blobCommittedBlockCount
-	}
-	if val := resp.Header.Get("x-ms-server-encrypted"); val != "" {
-		isServerEncrypted, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.IsServerEncrypted = &isServerEncrypted
-	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
-	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
-	}
-	if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" {
-		blobContentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.BlobContentMD5 = blobContentMD5
-	}
 	if val := resp.Header.Get("x-ms-tag-count"); val != "" {
 		tagCount, err := strconv.ParseInt(val, 10, 64)
 		if err != nil {
@@ -1122,46 +1183,11 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien
 		}
 		result.TagCount = &tagCount
 	}
-	if val := resp.Header.Get("x-ms-blob-sealed"); val != "" {
-		isSealed, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.IsSealed = &isSealed
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
 	}
-	if val := resp.Header.Get("x-ms-last-access-time"); val != "" {
-		lastAccessed, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.LastAccessed = &lastAccessed
-	}
-	if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" {
-		immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn
-	}
-	if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" {
-		result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val)
-	}
-	if val := resp.Header.Get("x-ms-legal-hold"); val != "" {
-		legalHold, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.LegalHold = &legalHold
-	}
-	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
-		contentCRC64, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlobClientDownloadResponse{}, err
-		}
-		result.ContentCRC64 = contentCRC64
-	}
-	if val := resp.Header.Get("x-ms-error-code"); val != "" {
-		result.ErrorCode = &val
+	if val := resp.Header.Get("x-ms-version-id"); val != "" {
+		result.VersionID = &val
 	}
 	return result, nil
 }
@@ -1172,18 +1198,21 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien
 // Generated from API version 2023-08-03
 //   - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method.
 func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) {
+	var err error
 	req, err := client.getAccountInfoCreateRequest(ctx, options)
 	if err != nil {
 		return BlobClientGetAccountInfoResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientGetAccountInfoResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientGetAccountInfoResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientGetAccountInfoResponse{}, err
 	}
-	return client.getAccountInfoHandleResponse(resp)
+	resp, err := client.getAccountInfoHandleResponse(httpResp)
+	return resp, err
 }
 
 // getAccountInfoCreateRequest creates the GetAccountInfo request.
@@ -1204,15 +1233,12 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio
 // getAccountInfoHandleResponse handles the GetAccountInfo response.
 func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (BlobClientGetAccountInfoResponse, error) {
 	result := BlobClientGetAccountInfoResponse{}
+	if val := resp.Header.Get("x-ms-account-kind"); val != "" {
+		result.AccountKind = (*AccountKind)(&val)
+	}
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -1220,11 +1246,14 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
 	if val := resp.Header.Get("x-ms-sku-name"); val != "" {
 		result.SKUName = (*SKUName)(&val)
 	}
-	if val := resp.Header.Get("x-ms-account-kind"); val != "" {
-		result.AccountKind = (*AccountKind)(&val)
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
 	}
 	return result, nil
 }
@@ -1239,18 +1268,21 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo
 //   - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientGetPropertiesResponse, error) {
+	var err error
 	req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientGetPropertiesResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientGetPropertiesResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientGetPropertiesResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientGetPropertiesResponse{}, err
 	}
-	return client.getPropertiesHandleResponse(resp)
+	resp, err := client.getPropertiesHandleResponse(httpResp)
+	return resp, err
 }
 
 // getPropertiesCreateRequest creates the GetProperties request.
@@ -1308,12 +1340,100 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option
 // getPropertiesHandleResponse handles the GetProperties response.
 func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (BlobClientGetPropertiesResponse, error) {
 	result := BlobClientGetPropertiesResponse{}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
+	if val := resp.Header.Get("Accept-Ranges"); val != "" {
+		result.AcceptRanges = &val
+	}
+	if val := resp.Header.Get("x-ms-access-tier"); val != "" {
+		result.AccessTier = &val
+	}
+	if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" {
+		accessTierChangeTime, err := time.Parse(time.RFC1123, val)
 		if err != nil {
 			return BlobClientGetPropertiesResponse{}, err
 		}
-		result.LastModified = &lastModified
+		result.AccessTierChangeTime = &accessTierChangeTime
+	}
+	if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" {
+		accessTierInferred, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.AccessTierInferred = &accessTierInferred
+	}
+	if val := resp.Header.Get("x-ms-archive-status"); val != "" {
+		result.ArchiveStatus = &val
+	}
+	if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" {
+		blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
+		blobCommittedBlockCount := int32(blobCommittedBlockCount32)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.BlobCommittedBlockCount = &blobCommittedBlockCount
+	}
+	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
+		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.BlobSequenceNumber = &blobSequenceNumber
+	}
+	if val := resp.Header.Get("x-ms-blob-type"); val != "" {
+		result.BlobType = (*BlobType)(&val)
+	}
+	if val := resp.Header.Get("Cache-Control"); val != "" {
+		result.CacheControl = &val
+	}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Content-Disposition"); val != "" {
+		result.ContentDisposition = &val
+	}
+	if val := resp.Header.Get("Content-Encoding"); val != "" {
+		result.ContentEncoding = &val
+	}
+	if val := resp.Header.Get("Content-Language"); val != "" {
+		result.ContentLanguage = &val
+	}
+	if val := resp.Header.Get("Content-Length"); val != "" {
+		contentLength, err := strconv.ParseInt(val, 10, 64)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.ContentLength = &contentLength
+	}
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.ContentMD5 = contentMD5
+	}
+	if val := resp.Header.Get("Content-Type"); val != "" {
+		result.ContentType = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" {
+		copyCompletionTime, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.CopyCompletionTime = &copyCompletionTime
+	}
+	if val := resp.Header.Get("x-ms-copy-id"); val != "" {
+		result.CopyID = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-progress"); val != "" {
+		result.CopyProgress = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-source"); val != "" {
+		result.CopySource = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-status"); val != "" {
+		result.CopyStatus = (*CopyStatusType)(&val)
+	}
+	if val := resp.Header.Get("x-ms-copy-status-description"); val != "" {
+		result.CopyStatusDescription = &val
 	}
 	if val := resp.Header.Get("x-ms-creation-time"); val != "" {
 		creationTime, err := time.Parse(time.RFC1123, val)
@@ -1322,6 +1442,100 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
 		}
 		result.CreationTime = &creationTime
 	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" {
+		result.DestinationSnapshot = &val
+	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
+	if val := resp.Header.Get("x-ms-expiry-time"); val != "" {
+		expiresOn, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.ExpiresOn = &expiresOn
+	}
+	if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" {
+		immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn
+	}
+	if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" {
+		result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val)
+	}
+	if val := resp.Header.Get("x-ms-is-current-version"); val != "" {
+		isCurrentVersion, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.IsCurrentVersion = &isCurrentVersion
+	}
+	if val := resp.Header.Get("x-ms-incremental-copy"); val != "" {
+		isIncrementalCopy, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.IsIncrementalCopy = &isIncrementalCopy
+	}
+	if val := resp.Header.Get("x-ms-blob-sealed"); val != "" {
+		isSealed, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.IsSealed = &isSealed
+	}
+	if val := resp.Header.Get("x-ms-server-encrypted"); val != "" {
+		isServerEncrypted, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.IsServerEncrypted = &isServerEncrypted
+	}
+	if val := resp.Header.Get("x-ms-last-access-time"); val != "" {
+		lastAccessed, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.LastAccessed = &lastAccessed
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-lease-duration"); val != "" {
+		result.LeaseDuration = (*LeaseDurationType)(&val)
+	}
+	if val := resp.Header.Get("x-ms-lease-state"); val != "" {
+		result.LeaseState = (*LeaseStateType)(&val)
+	}
+	if val := resp.Header.Get("x-ms-lease-status"); val != "" {
+		result.LeaseStatus = (*LeaseStatusType)(&val)
+	}
+	if val := resp.Header.Get("x-ms-legal-hold"); val != "" {
+		legalHold, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientGetPropertiesResponse{}, err
+		}
+		result.LegalHold = &legalHold
+	}
 	for hh := range resp.Header {
 		if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") {
 			if result.Metadata == nil {
@@ -1341,159 +1555,12 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
 			result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh))
 		}
 	}
-	if val := resp.Header.Get("x-ms-blob-type"); val != "" {
-		result.BlobType = (*BlobType)(&val)
-	}
-	if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" {
-		copyCompletionTime, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.CopyCompletionTime = &copyCompletionTime
-	}
-	if val := resp.Header.Get("x-ms-copy-status-description"); val != "" {
-		result.CopyStatusDescription = &val
-	}
-	if val := resp.Header.Get("x-ms-copy-id"); val != "" {
-		result.CopyID = &val
-	}
-	if val := resp.Header.Get("x-ms-copy-progress"); val != "" {
-		result.CopyProgress = &val
-	}
-	if val := resp.Header.Get("x-ms-copy-source"); val != "" {
-		result.CopySource = &val
-	}
-	if val := resp.Header.Get("x-ms-copy-status"); val != "" {
-		result.CopyStatus = (*CopyStatusType)(&val)
-	}
-	if val := resp.Header.Get("x-ms-incremental-copy"); val != "" {
-		isIncrementalCopy, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.IsIncrementalCopy = &isIncrementalCopy
-	}
-	if val := resp.Header.Get("x-ms-copy-destination-snapshot"); val != "" {
-		result.DestinationSnapshot = &val
-	}
-	if val := resp.Header.Get("x-ms-lease-duration"); val != "" {
-		result.LeaseDuration = (*LeaseDurationType)(&val)
-	}
-	if val := resp.Header.Get("x-ms-lease-state"); val != "" {
-		result.LeaseState = (*LeaseStateType)(&val)
-	}
-	if val := resp.Header.Get("x-ms-lease-status"); val != "" {
-		result.LeaseStatus = (*LeaseStatusType)(&val)
-	}
-	if val := resp.Header.Get("Content-Length"); val != "" {
-		contentLength, err := strconv.ParseInt(val, 10, 64)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.ContentLength = &contentLength
-	}
-	if val := resp.Header.Get("Content-Type"); val != "" {
-		result.ContentType = &val
-	}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
-	}
-	if val := resp.Header.Get("Content-Encoding"); val != "" {
-		result.ContentEncoding = &val
-	}
-	if val := resp.Header.Get("Content-Disposition"); val != "" {
-		result.ContentDisposition = &val
-	}
-	if val := resp.Header.Get("Content-Language"); val != "" {
-		result.ContentLanguage = &val
-	}
-	if val := resp.Header.Get("Cache-Control"); val != "" {
-		result.CacheControl = &val
-	}
-	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
-		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.BlobSequenceNumber = &blobSequenceNumber
-	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
+	if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" {
+		result.RehydratePriority = &val
 	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("Accept-Ranges"); val != "" {
-		result.AcceptRanges = &val
-	}
-	if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" {
-		blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
-		blobCommittedBlockCount := int32(blobCommittedBlockCount32)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.BlobCommittedBlockCount = &blobCommittedBlockCount
-	}
-	if val := resp.Header.Get("x-ms-server-encrypted"); val != "" {
-		isServerEncrypted, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.IsServerEncrypted = &isServerEncrypted
-	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
-	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
-	}
-	if val := resp.Header.Get("x-ms-access-tier"); val != "" {
-		result.AccessTier = &val
-	}
-	if val := resp.Header.Get("x-ms-access-tier-inferred"); val != "" {
-		accessTierInferred, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.AccessTierInferred = &accessTierInferred
-	}
-	if val := resp.Header.Get("x-ms-archive-status"); val != "" {
-		result.ArchiveStatus = &val
-	}
-	if val := resp.Header.Get("x-ms-access-tier-change-time"); val != "" {
-		accessTierChangeTime, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.AccessTierChangeTime = &accessTierChangeTime
-	}
-	if val := resp.Header.Get("x-ms-version-id"); val != "" {
-		result.VersionID = &val
-	}
-	if val := resp.Header.Get("x-ms-is-current-version"); val != "" {
-		isCurrentVersion, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.IsCurrentVersion = &isCurrentVersion
-	}
 	if val := resp.Header.Get("x-ms-tag-count"); val != "" {
 		tagCount, err := strconv.ParseInt(val, 10, 64)
 		if err != nil {
@@ -1501,46 +1568,11 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
 		}
 		result.TagCount = &tagCount
 	}
-	if val := resp.Header.Get("x-ms-expiry-time"); val != "" {
-		expiresOn, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.ExpiresOn = &expiresOn
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
 	}
-	if val := resp.Header.Get("x-ms-blob-sealed"); val != "" {
-		isSealed, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.IsSealed = &isSealed
-	}
-	if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" {
-		result.RehydratePriority = &val
-	}
-	if val := resp.Header.Get("x-ms-last-access-time"); val != "" {
-		lastAccessed, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.LastAccessed = &lastAccessed
-	}
-	if val := resp.Header.Get("x-ms-immutability-policy-until-date"); val != "" {
-		immutabilityPolicyExpiresOn, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.ImmutabilityPolicyExpiresOn = &immutabilityPolicyExpiresOn
-	}
-	if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" {
-		result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val)
-	}
-	if val := resp.Header.Get("x-ms-legal-hold"); val != "" {
-		legalHold, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientGetPropertiesResponse{}, err
-		}
-		result.LegalHold = &legalHold
+	if val := resp.Header.Get("x-ms-version-id"); val != "" {
+		result.VersionID = &val
 	}
 	return result, nil
 }
@@ -1553,18 +1585,21 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientGetTagsResponse, error) {
+	var err error
 	req, err := client.getTagsCreateRequest(ctx, options, modifiedAccessConditions, leaseAccessConditions)
 	if err != nil {
 		return BlobClientGetTagsResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientGetTagsResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientGetTagsResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientGetTagsResponse{}, err
 	}
-	return client.getTagsHandleResponse(resp)
+	resp, err := client.getTagsHandleResponse(httpResp)
+	return resp, err
 }
 
 // getTagsCreateRequest creates the GetTags request.
@@ -1605,12 +1640,6 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -1618,6 +1647,12 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	if err := runtime.UnmarshalAsXML(resp, &result.BlobTags); err != nil {
 		return BlobClientGetTagsResponse{}, err
 	}
@@ -1633,18 +1668,21 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient
 //   - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientQueryResponse, error) {
+	var err error
 	req, err := client.queryCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientQueryResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientQueryResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusPartialContent) {
-		return BlobClientQueryResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusPartialContent) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientQueryResponse{}, err
 	}
-	return client.queryHandleResponse(resp)
+	resp, err := client.queryHandleResponse(httpResp)
+	return resp, err
 }
 
 // queryCreateRequest creates the Query request.
@@ -1707,55 +1745,23 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC
 // queryHandleResponse handles the Query response.
 func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQueryResponse, error) {
 	result := BlobClientQueryResponse{Body: resp.Body}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
+	if val := resp.Header.Get("Accept-Ranges"); val != "" {
+		result.AcceptRanges = &val
+	}
+	if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" {
+		blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
+		blobCommittedBlockCount := int32(blobCommittedBlockCount32)
 		if err != nil {
 			return BlobClientQueryResponse{}, err
 		}
-		result.LastModified = &lastModified
+		result.BlobCommittedBlockCount = &blobCommittedBlockCount
 	}
-	for hh := range resp.Header {
-		if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") {
-			if result.Metadata == nil {
-				result.Metadata = map[string]*string{}
-			}
-			result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh))
-		}
-	}
-	if val := resp.Header.Get("Content-Length"); val != "" {
-		contentLength, err := strconv.ParseInt(val, 10, 64)
+	if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" {
+		blobContentMD5, err := base64.StdEncoding.DecodeString(val)
 		if err != nil {
 			return BlobClientQueryResponse{}, err
 		}
-		result.ContentLength = &contentLength
-	}
-	if val := resp.Header.Get("Content-Type"); val != "" {
-		result.ContentType = &val
-	}
-	if val := resp.Header.Get("Content-Range"); val != "" {
-		result.ContentRange = &val
-	}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlobClientQueryResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
-	}
-	if val := resp.Header.Get("Content-Encoding"); val != "" {
-		result.ContentEncoding = &val
-	}
-	if val := resp.Header.Get("Cache-Control"); val != "" {
-		result.CacheControl = &val
-	}
-	if val := resp.Header.Get("Content-Disposition"); val != "" {
-		result.ContentDisposition = &val
-	}
-	if val := resp.Header.Get("Content-Language"); val != "" {
-		result.ContentLanguage = &val
+		result.BlobContentMD5 = blobContentMD5
 	}
 	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
 		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
@@ -1767,6 +1773,48 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu
 	if val := resp.Header.Get("x-ms-blob-type"); val != "" {
 		result.BlobType = (*BlobType)(&val)
 	}
+	if val := resp.Header.Get("Cache-Control"); val != "" {
+		result.CacheControl = &val
+	}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
+		contentCRC64, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return BlobClientQueryResponse{}, err
+		}
+		result.ContentCRC64 = contentCRC64
+	}
+	if val := resp.Header.Get("Content-Disposition"); val != "" {
+		result.ContentDisposition = &val
+	}
+	if val := resp.Header.Get("Content-Encoding"); val != "" {
+		result.ContentEncoding = &val
+	}
+	if val := resp.Header.Get("Content-Language"); val != "" {
+		result.ContentLanguage = &val
+	}
+	if val := resp.Header.Get("Content-Length"); val != "" {
+		contentLength, err := strconv.ParseInt(val, 10, 64)
+		if err != nil {
+			return BlobClientQueryResponse{}, err
+		}
+		result.ContentLength = &contentLength
+	}
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return BlobClientQueryResponse{}, err
+		}
+		result.ContentMD5 = contentMD5
+	}
+	if val := resp.Header.Get("Content-Range"); val != "" {
+		result.ContentRange = &val
+	}
+	if val := resp.Header.Get("Content-Type"); val != "" {
+		result.ContentType = &val
+	}
 	if val := resp.Header.Get("x-ms-copy-completion-time"); val != "" {
 		copyCompletionTime, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -1774,9 +1822,6 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu
 		}
 		result.CopyCompletionTime = &copyCompletionTime
 	}
-	if val := resp.Header.Get("x-ms-copy-status-description"); val != "" {
-		result.CopyStatusDescription = &val
-	}
 	if val := resp.Header.Get("x-ms-copy-id"); val != "" {
 		result.CopyID = &val
 	}
@@ -1789,6 +1834,39 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu
 	if val := resp.Header.Get("x-ms-copy-status"); val != "" {
 		result.CopyStatus = (*CopyStatusType)(&val)
 	}
+	if val := resp.Header.Get("x-ms-copy-status-description"); val != "" {
+		result.CopyStatusDescription = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientQueryResponse{}, err
+		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
+	if val := resp.Header.Get("x-ms-server-encrypted"); val != "" {
+		isServerEncrypted, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientQueryResponse{}, err
+		}
+		result.IsServerEncrypted = &isServerEncrypted
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientQueryResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
 	if val := resp.Header.Get("x-ms-lease-duration"); val != "" {
 		result.LeaseDuration = (*LeaseDurationType)(&val)
 	}
@@ -1798,8 +1876,13 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu
 	if val := resp.Header.Get("x-ms-lease-status"); val != "" {
 		result.LeaseStatus = (*LeaseStatusType)(&val)
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
+	for hh := range resp.Header {
+		if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") {
+			if result.Metadata == nil {
+				result.Metadata = map[string]*string{}
+			}
+			result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh))
+		}
 	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
@@ -1807,51 +1890,6 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Accept-Ranges"); val != "" {
-		result.AcceptRanges = &val
-	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientQueryResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-blob-committed-block-count"); val != "" {
-		blobCommittedBlockCount32, err := strconv.ParseInt(val, 10, 32)
-		blobCommittedBlockCount := int32(blobCommittedBlockCount32)
-		if err != nil {
-			return BlobClientQueryResponse{}, err
-		}
-		result.BlobCommittedBlockCount = &blobCommittedBlockCount
-	}
-	if val := resp.Header.Get("x-ms-server-encrypted"); val != "" {
-		isServerEncrypted, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientQueryResponse{}, err
-		}
-		result.IsServerEncrypted = &isServerEncrypted
-	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
-	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
-	}
-	if val := resp.Header.Get("x-ms-blob-content-md5"); val != "" {
-		blobContentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlobClientQueryResponse{}, err
-		}
-		result.BlobContentMD5 = blobContentMD5
-	}
-	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
-		contentCRC64, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlobClientQueryResponse{}, err
-		}
-		result.ContentCRC64 = contentCRC64
-	}
 	return result, nil
 }
 
@@ -1863,18 +1901,21 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu
 //   - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, options *BlobClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientReleaseLeaseResponse, error) {
+	var err error
 	req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientReleaseLeaseResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientReleaseLeaseResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientReleaseLeaseResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientReleaseLeaseResponse{}, err
 	}
-	return client.releaseLeaseHandleResponse(resp)
+	resp, err := client.releaseLeaseHandleResponse(httpResp)
+	return resp, err
 }
 
 // releaseLeaseCreateRequest creates the ReleaseLease request.
@@ -1917,6 +1958,16 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID
 // releaseLeaseHandleResponse handles the ReleaseLease response.
 func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobClientReleaseLeaseResponse, error) {
 	result := BlobClientReleaseLeaseResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientReleaseLeaseResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -1927,22 +1978,12 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientReleaseLeaseResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -1954,18 +1995,21 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC
 //   - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, options *BlobClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientRenewLeaseResponse, error) {
+	var err error
 	req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientRenewLeaseResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientRenewLeaseResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientRenewLeaseResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientRenewLeaseResponse{}, err
 	}
-	return client.renewLeaseHandleResponse(resp)
+	resp, err := client.renewLeaseHandleResponse(httpResp)
+	return resp, err
 }
 
 // renewLeaseCreateRequest creates the RenewLease request.
@@ -2008,6 +2052,16 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s
 // renewLeaseHandleResponse handles the RenewLease response.
 func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobClientRenewLeaseResponse, error) {
 	result := BlobClientRenewLeaseResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientRenewLeaseResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -2021,22 +2075,12 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli
 	if val := resp.Header.Get("x-ms-lease-id"); val != "" {
 		result.LeaseID = &val
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientRenewLeaseResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -2047,18 +2091,21 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli
 //   - expiryOptions - Required. Indicates mode of the expiry time
 //   - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method.
 func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) {
+	var err error
 	req, err := client.setExpiryCreateRequest(ctx, expiryOptions, options)
 	if err != nil {
 		return BlobClientSetExpiryResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientSetExpiryResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientSetExpiryResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientSetExpiryResponse{}, err
 	}
-	return client.setExpiryHandleResponse(resp)
+	resp, err := client.setExpiryHandleResponse(httpResp)
+	return resp, err
 }
 
 // setExpiryCreateRequest creates the SetExpiry request.
@@ -2088,6 +2135,16 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti
 // setExpiryHandleResponse handles the SetExpiry response.
 func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClientSetExpiryResponse, error) {
 	result := BlobClientSetExpiryResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientSetExpiryResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -2098,22 +2155,12 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientSetExpiryResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -2126,18 +2173,21 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClientSetHTTPHeadersOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetHTTPHeadersResponse, error) {
+	var err error
 	req, err := client.setHTTPHeadersCreateRequest(ctx, options, blobHTTPHeaders, leaseAccessConditions, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientSetHTTPHeadersResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientSetHTTPHeadersResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientSetHTTPHeadersResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientSetHTTPHeadersResponse{}, err
 	}
-	return client.setHTTPHeadersHandleResponse(resp)
+	resp, err := client.setHTTPHeadersHandleResponse(httpResp)
+	return resp, err
 }
 
 // setHTTPHeadersCreateRequest creates the SetHTTPHeaders request.
@@ -2199,16 +2249,6 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio
 // setHTTPHeadersHandleResponse handles the SetHTTPHeaders response.
 func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (BlobClientSetHTTPHeadersResponse, error) {
 	result := BlobClientSetHTTPHeadersResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientSetHTTPHeadersResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
 	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
 		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
 		if err != nil {
@@ -2219,12 +2259,6 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -2232,6 +2266,22 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientSetHTTPHeadersResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -2243,18 +2293,21 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo
 //     method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *BlobClientSetImmutabilityPolicyOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetImmutabilityPolicyResponse, error) {
+	var err error
 	req, err := client.setImmutabilityPolicyCreateRequest(ctx, options, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientSetImmutabilityPolicyResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientSetImmutabilityPolicyResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientSetImmutabilityPolicyResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientSetImmutabilityPolicyResponse{}, err
 	}
-	return client.setImmutabilityPolicyHandleResponse(resp)
+	resp, err := client.setImmutabilityPolicyHandleResponse(httpResp)
+	return resp, err
 }
 
 // setImmutabilityPolicyCreateRequest creates the SetImmutabilityPolicy request.
@@ -2292,12 +2345,6 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -2315,6 +2362,12 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons
 	if val := resp.Header.Get("x-ms-immutability-policy-mode"); val != "" {
 		result.ImmutabilityPolicyMode = (*ImmutabilityPolicyMode)(&val)
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -2325,18 +2378,21 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons
 //   - legalHold - Specified if a legal hold should be set on the blob.
 //   - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method.
 func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) {
+	var err error
 	req, err := client.setLegalHoldCreateRequest(ctx, legalHold, options)
 	if err != nil {
 		return BlobClientSetLegalHoldResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientSetLegalHoldResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientSetLegalHoldResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientSetLegalHoldResponse{}, err
 	}
-	return client.setLegalHoldHandleResponse(resp)
+	resp, err := client.setLegalHoldHandleResponse(httpResp)
+	return resp, err
 }
 
 // setLegalHoldCreateRequest creates the SetLegalHold request.
@@ -2366,12 +2422,6 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -2386,6 +2436,12 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC
 		}
 		result.LegalHold = &legalHold
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -2400,18 +2456,21 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC
 //   - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetMetadataResponse, error) {
+	var err error
 	req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientSetMetadataResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientSetMetadataResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientSetMetadataResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientSetMetadataResponse{}, err
 	}
-	return client.setMetadataHandleResponse(resp)
+	resp, err := client.setMetadataHandleResponse(httpResp)
+	return resp, err
 }
 
 // setMetadataCreateRequest creates the SetMetadata request.
@@ -2474,9 +2533,32 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options
 // setMetadataHandleResponse handles the SetMetadata response.
 func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobClientSetMetadataResponse, error) {
 	result := BlobClientSetMetadataResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientSetMetadataResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
+	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
+		isServerEncrypted, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlobClientSetMetadataResponse{}, err
+		}
+		result.IsServerEncrypted = &isServerEncrypted
+	}
 	if val := resp.Header.Get("Last-Modified"); val != "" {
 		lastModified, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -2484,9 +2566,6 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
@@ -2496,26 +2575,6 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl
 	if val := resp.Header.Get("x-ms-version-id"); val != "" {
 		result.VersionID = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientSetMetadataResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
-		isServerEncrypted, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlobClientSetMetadataResponse{}, err
-		}
-		result.IsServerEncrypted = &isServerEncrypted
-	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
-	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
-	}
 	return result, nil
 }
 
@@ -2528,18 +2587,21 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *BlobClientSetTagsOptions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientSetTagsResponse, error) {
+	var err error
 	req, err := client.setTagsCreateRequest(ctx, tags, options, modifiedAccessConditions, leaseAccessConditions)
 	if err != nil {
 		return BlobClientSetTagsResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientSetTagsResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusNoContent) {
-		return BlobClientSetTagsResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusNoContent) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientSetTagsResponse{}, err
 	}
-	return client.setTagsHandleResponse(resp)
+	resp, err := client.setTagsHandleResponse(httpResp)
+	return resp, err
 }
 
 // setTagsCreateRequest creates the SetTags request.
@@ -2586,12 +2648,6 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -2599,6 +2655,12 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -2614,18 +2676,21 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientSetTierResponse, error) {
+	var err error
 	req, err := client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions)
 	if err != nil {
 		return BlobClientSetTierResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientSetTierResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) {
-		return BlobClientSetTierResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK, http.StatusAccepted) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientSetTierResponse{}, err
 	}
-	return client.setTierHandleResponse(resp)
+	resp, err := client.setTierHandleResponse(httpResp)
+	return resp, err
 }
 
 // setTierCreateRequest creates the SetTier request.
@@ -2692,18 +2757,21 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource string, options *BlobClientStartCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientStartCopyFromURLResponse, error) {
+	var err error
 	req, err := client.startCopyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions)
 	if err != nil {
 		return BlobClientStartCopyFromURLResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientStartCopyFromURLResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusAccepted) {
-		return BlobClientStartCopyFromURLResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientStartCopyFromURLResponse{}, err
 	}
-	return client.startCopyFromURLHandleResponse(resp)
+	resp, err := client.startCopyFromURLHandleResponse(httpResp)
+	return resp, err
 }
 
 // startCopyFromURLCreateRequest creates the StartCopyFromURL request.
@@ -2790,6 +2858,22 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop
 // startCopyFromURLHandleResponse handles the StartCopyFromURL response.
 func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (BlobClientStartCopyFromURLResponse, error) {
 	result := BlobClientStartCopyFromURLResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-id"); val != "" {
+		result.CopyID = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-status"); val != "" {
+		result.CopyStatus = (*CopyStatusType)(&val)
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlobClientStartCopyFromURLResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -2800,9 +2884,6 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
@@ -2812,19 +2893,6 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B
 	if val := resp.Header.Get("x-ms-version-id"); val != "" {
 		result.VersionID = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlobClientStartCopyFromURLResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-copy-id"); val != "" {
-		result.CopyID = &val
-	}
-	if val := resp.Header.Get("x-ms-copy-status"); val != "" {
-		result.CopyStatus = (*CopyStatusType)(&val)
-	}
 	return result, nil
 }
 
@@ -2834,18 +2902,21 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B
 // Generated from API version 2023-08-03
 //   - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method.
 func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) {
+	var err error
 	req, err := client.undeleteCreateRequest(ctx, options)
 	if err != nil {
 		return BlobClientUndeleteResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlobClientUndeleteResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlobClientUndeleteResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlobClientUndeleteResponse{}, err
 	}
-	return client.undeleteHandleResponse(resp)
+	resp, err := client.undeleteHandleResponse(httpResp)
+	return resp, err
 }
 
 // undeleteCreateRequest creates the Undelete request.
@@ -2874,12 +2945,6 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -2887,5 +2952,11 @@ func (client *BlobClient) undeleteHandleResponse(resp *http.Response) (BlobClien
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go
index 6b2def53f8..bfd7f5eac7 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
@@ -47,18 +46,21 @@ type BlockBlobClient struct {
 //   - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks BlockLookupList, options *BlockBlobClientCommitBlockListOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientCommitBlockListResponse, error) {
+	var err error
 	req, err := client.commitBlockListCreateRequest(ctx, blocks, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
 	if err != nil {
 		return BlockBlobClientCommitBlockListResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlockBlobClientCommitBlockListResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return BlockBlobClientCommitBlockListResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return BlockBlobClientCommitBlockListResponse{}, err
 	}
-	return client.commitBlockListHandleResponse(resp)
+	resp, err := client.commitBlockListHandleResponse(httpResp)
+	return resp, err
 }
 
 // commitBlockListCreateRequest creates the CommitBlockList request.
@@ -163,22 +165,8 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context,
 // commitBlockListHandleResponse handles the CommitBlockList response.
 func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response) (BlockBlobClientCommitBlockListResponse, error) {
 	result := BlockBlobClientCommitBlockListResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlockBlobClientCommitBlockListResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlockBlobClientCommitBlockListResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
 		contentCRC64, err := base64.StdEncoding.DecodeString(val)
@@ -187,8 +175,42 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response
 		}
 		result.ContentCRC64 = contentCRC64
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return BlockBlobClientCommitBlockListResponse{}, err
+		}
+		result.ContentMD5 = contentMD5
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlockBlobClientCommitBlockListResponse{}, err
+		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
+	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
+		isServerEncrypted, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlockBlobClientCommitBlockListResponse{}, err
+		}
+		result.IsServerEncrypted = &isServerEncrypted
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlockBlobClientCommitBlockListResponse{}, err
+		}
+		result.LastModified = &lastModified
 	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
@@ -199,26 +221,6 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response
 	if val := resp.Header.Get("x-ms-version-id"); val != "" {
 		result.VersionID = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlockBlobClientCommitBlockListResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
-		isServerEncrypted, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlockBlobClientCommitBlockListResponse{}, err
-		}
-		result.IsServerEncrypted = &isServerEncrypted
-	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
-	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
-	}
 	return result, nil
 }
 
@@ -231,18 +233,21 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockListType, options *BlockBlobClientGetBlockListOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientGetBlockListResponse, error) {
+	var err error
 	req, err := client.getBlockListCreateRequest(ctx, listType, options, leaseAccessConditions, modifiedAccessConditions)
 	if err != nil {
 		return BlockBlobClientGetBlockListResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlockBlobClientGetBlockListResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return BlockBlobClientGetBlockListResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return BlockBlobClientGetBlockListResponse{}, err
 	}
-	return client.getBlockListHandleResponse(resp)
+	resp, err := client.getBlockListHandleResponse(httpResp)
+	return resp, err
 }
 
 // getBlockListCreateRequest creates the GetBlockList request.
@@ -278,19 +283,6 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li
 // getBlockListHandleResponse handles the GetBlockList response.
 func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (BlockBlobClientGetBlockListResponse, error) {
 	result := BlockBlobClientGetBlockListResponse{}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlockBlobClientGetBlockListResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Content-Type"); val != "" {
-		result.ContentType = &val
-	}
 	if val := resp.Header.Get("x-ms-blob-content-length"); val != "" {
 		blobContentLength, err := strconv.ParseInt(val, 10, 64)
 		if err != nil {
@@ -301,11 +293,8 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
+	if val := resp.Header.Get("Content-Type"); val != "" {
+		result.ContentType = &val
 	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
@@ -314,6 +303,22 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlockBlobClientGetBlockListResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	if err := runtime.UnmarshalAsXML(resp, &result.BlockList); err != nil {
 		return BlockBlobClientGetBlockListResponse{}, err
 	}
@@ -342,18 +347,21 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (
 //   - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL
 //     method.
 func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength int64, copySource string, options *BlockBlobClientPutBlobFromURLOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientPutBlobFromURLResponse, error) {
+	var err error
 	req, err := client.putBlobFromURLCreateRequest(ctx, contentLength, copySource, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions, sourceModifiedAccessConditions)
 	if err != nil {
 		return BlockBlobClientPutBlobFromURLResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlockBlobClientPutBlobFromURLResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return BlockBlobClientPutBlobFromURLResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return BlockBlobClientPutBlobFromURLResponse{}, err
 	}
-	return client.putBlobFromURLHandleResponse(resp)
+	resp, err := client.putBlobFromURLHandleResponse(httpResp)
+	return resp, err
 }
 
 // putBlobFromURLCreateRequest creates the PutBlobFromURL request.
@@ -472,15 +480,8 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context,
 // putBlobFromURLHandleResponse handles the PutBlobFromURL response.
 func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) (BlockBlobClientPutBlobFromURLResponse, error) {
 	result := BlockBlobClientPutBlobFromURLResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlockBlobClientPutBlobFromURLResponse{}, err
-		}
-		result.LastModified = &lastModified
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
 	}
 	if val := resp.Header.Get("Content-MD5"); val != "" {
 		contentMD5, err := base64.StdEncoding.DecodeString(val)
@@ -489,8 +490,35 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response)
 		}
 		result.ContentMD5 = contentMD5
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlockBlobClientPutBlobFromURLResponse{}, err
+		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
+	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
+		isServerEncrypted, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlockBlobClientPutBlobFromURLResponse{}, err
+		}
+		result.IsServerEncrypted = &isServerEncrypted
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlockBlobClientPutBlobFromURLResponse{}, err
+		}
+		result.LastModified = &lastModified
 	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
@@ -501,26 +529,6 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response)
 	if val := resp.Header.Get("x-ms-version-id"); val != "" {
 		result.VersionID = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlockBlobClientPutBlobFromURLResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
-		isServerEncrypted, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlockBlobClientPutBlobFromURLResponse{}, err
-		}
-		result.IsServerEncrypted = &isServerEncrypted
-	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
-	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
-	}
 	return result, nil
 }
 
@@ -538,18 +546,21 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response)
 //   - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
 //   - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
 func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientStageBlockOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo) (BlockBlobClientStageBlockResponse, error) {
+	var err error
 	req, err := client.stageBlockCreateRequest(ctx, blockID, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo)
 	if err != nil {
 		return BlockBlobClientStageBlockResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlockBlobClientStageBlockResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return BlockBlobClientStageBlockResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return BlockBlobClientStageBlockResponse{}, err
 	}
-	return client.stageBlockHandleResponse(resp)
+	resp, err := client.stageBlockHandleResponse(httpResp)
+	return resp, err
 }
 
 // stageBlockCreateRequest creates the StageBlock request.
@@ -601,29 +612,9 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc
 // stageBlockHandleResponse handles the StageBlock response.
 func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (BlockBlobClientStageBlockResponse, error) {
 	result := BlockBlobClientStageBlockResponse{}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlockBlobClientStageBlockResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
-	}
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlockBlobClientStageBlockResponse{}, err
-		}
-		result.Date = &date
-	}
 	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
 		contentCRC64, err := base64.StdEncoding.DecodeString(val)
 		if err != nil {
@@ -631,6 +622,26 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl
 		}
 		result.ContentCRC64 = contentCRC64
 	}
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return BlockBlobClientStageBlockResponse{}, err
+		}
+		result.ContentMD5 = contentMD5
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlockBlobClientStageBlockResponse{}, err
+		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
 	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
 		isServerEncrypted, err := strconv.ParseBool(val)
 		if err != nil {
@@ -638,11 +649,11 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl
 		}
 		result.IsServerEncrypted = &isServerEncrypted
 	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
 	}
 	return result, nil
 }
@@ -665,18 +676,21 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl
 //   - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL
 //     method.
 func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID string, contentLength int64, sourceURL string, options *BlockBlobClientStageBlockFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (BlockBlobClientStageBlockFromURLResponse, error) {
+	var err error
 	req, err := client.stageBlockFromURLCreateRequest(ctx, blockID, contentLength, sourceURL, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sourceModifiedAccessConditions)
 	if err != nil {
 		return BlockBlobClientStageBlockFromURLResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlockBlobClientStageBlockFromURLResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return BlockBlobClientStageBlockFromURLResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return BlockBlobClientStageBlockFromURLResponse{}, err
 	}
-	return client.stageBlockFromURLHandleResponse(resp)
+	resp, err := client.stageBlockFromURLHandleResponse(httpResp)
+	return resp, err
 }
 
 // stageBlockFromURLCreateRequest creates the StageBlockFromURL request.
@@ -744,12 +758,8 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex
 // stageBlockFromURLHandleResponse handles the StageBlockFromURL response.
 func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Response) (BlockBlobClientStageBlockFromURLResponse, error) {
 	result := BlockBlobClientStageBlockFromURLResponse{}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return BlockBlobClientStageBlockFromURLResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
 		contentCRC64, err := base64.StdEncoding.DecodeString(val)
@@ -758,14 +768,12 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon
 		}
 		result.ContentCRC64 = contentCRC64
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return BlockBlobClientStageBlockFromURLResponse{}, err
+		}
+		result.ContentMD5 = contentMD5
 	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
@@ -774,6 +782,12 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
 	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
 		isServerEncrypted, err := strconv.ParseBool(val)
 		if err != nil {
@@ -781,11 +795,11 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon
 		}
 		result.IsServerEncrypted = &isServerEncrypted
 	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
 	}
 	return result, nil
 }
@@ -806,18 +820,21 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon
 //   - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *BlockBlobClientUploadOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (BlockBlobClientUploadResponse, error) {
+	var err error
 	req, err := client.uploadCreateRequest(ctx, contentLength, body, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
 	if err != nil {
 		return BlockBlobClientUploadResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return BlockBlobClientUploadResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return BlockBlobClientUploadResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return BlockBlobClientUploadResponse{}, err
 	}
-	return client.uploadHandleResponse(resp)
+	resp, err := client.uploadHandleResponse(httpResp)
+	return resp, err
 }
 
 // uploadCreateRequest creates the Upload request.
@@ -923,15 +940,8 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL
 // uploadHandleResponse handles the Upload response.
 func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockBlobClientUploadResponse, error) {
 	result := BlockBlobClientUploadResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlockBlobClientUploadResponse{}, err
-		}
-		result.LastModified = &lastModified
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
 	}
 	if val := resp.Header.Get("Content-MD5"); val != "" {
 		contentMD5, err := base64.StdEncoding.DecodeString(val)
@@ -940,8 +950,35 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB
 		}
 		result.ContentMD5 = contentMD5
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlockBlobClientUploadResponse{}, err
+		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
+	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
+		isServerEncrypted, err := strconv.ParseBool(val)
+		if err != nil {
+			return BlockBlobClientUploadResponse{}, err
+		}
+		result.IsServerEncrypted = &isServerEncrypted
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return BlockBlobClientUploadResponse{}, err
+		}
+		result.LastModified = &lastModified
 	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
@@ -952,25 +989,5 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB
 	if val := resp.Header.Get("x-ms-version-id"); val != "" {
 		result.VersionID = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return BlockBlobClientUploadResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
-		isServerEncrypted, err := strconv.ParseBool(val)
-		if err != nil {
-			return BlockBlobClientUploadResponse{}, err
-		}
-		result.IsServerEncrypted = &isServerEncrypted
-	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
-	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
-	}
 	return result, nil
 }
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go
index b9d306cacf..95af9e1544 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go
index 8d325a3a58..ce1ff6fdd7 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
@@ -42,18 +41,21 @@ type ContainerClient struct {
 //   - options - ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, options *ContainerClientAcquireLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientAcquireLeaseResponse, error) {
+	var err error
 	req, err := client.acquireLeaseCreateRequest(ctx, duration, options, modifiedAccessConditions)
 	if err != nil {
 		return ContainerClientAcquireLeaseResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientAcquireLeaseResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return ContainerClientAcquireLeaseResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientAcquireLeaseResponse{}, err
 	}
-	return client.acquireLeaseHandleResponse(resp)
+	resp, err := client.acquireLeaseHandleResponse(httpResp)
+	return resp, err
 }
 
 // acquireLeaseCreateRequest creates the AcquireLease request.
@@ -91,6 +93,16 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du
 // acquireLeaseHandleResponse handles the AcquireLease response.
 func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (ContainerClientAcquireLeaseResponse, error) {
 	result := ContainerClientAcquireLeaseResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return ContainerClientAcquireLeaseResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -104,22 +116,12 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (
 	if val := resp.Header.Get("x-ms-lease-id"); val != "" {
 		result.LeaseID = &val
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return ContainerClientAcquireLeaseResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -131,18 +133,21 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (
 //   - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) {
+	var err error
 	req, err := client.breakLeaseCreateRequest(ctx, options, modifiedAccessConditions)
 	if err != nil {
 		return ContainerClientBreakLeaseResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientBreakLeaseResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusAccepted) {
-		return ContainerClientBreakLeaseResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientBreakLeaseResponse{}, err
 	}
-	return client.breakLeaseHandleResponse(resp)
+	resp, err := client.breakLeaseHandleResponse(httpResp)
+	return resp, err
 }
 
 // breakLeaseCreateRequest creates the BreakLease request.
@@ -179,6 +184,16 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti
 // breakLeaseHandleResponse handles the BreakLease response.
 func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (ContainerClientBreakLeaseResponse, error) {
 	result := ContainerClientBreakLeaseResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return ContainerClientBreakLeaseResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -197,22 +212,12 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co
 		}
 		result.LeaseTime = &leaseTime
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return ContainerClientBreakLeaseResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -228,18 +233,21 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co
 //   - options - ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, proposedLeaseID string, options *ContainerClientChangeLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientChangeLeaseResponse, error) {
+	var err error
 	req, err := client.changeLeaseCreateRequest(ctx, leaseID, proposedLeaseID, options, modifiedAccessConditions)
 	if err != nil {
 		return ContainerClientChangeLeaseResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientChangeLeaseResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ContainerClientChangeLeaseResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientChangeLeaseResponse{}, err
 	}
-	return client.changeLeaseHandleResponse(resp)
+	resp, err := client.changeLeaseHandleResponse(httpResp)
+	return resp, err
 }
 
 // changeLeaseCreateRequest creates the ChangeLease request.
@@ -275,6 +283,16 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea
 // changeLeaseHandleResponse handles the ChangeLease response.
 func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (ContainerClientChangeLeaseResponse, error) {
 	result := ContainerClientChangeLeaseResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return ContainerClientChangeLeaseResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -288,22 +306,12 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C
 	if val := resp.Header.Get("x-ms-lease-id"); val != "" {
 		result.LeaseID = &val
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return ContainerClientChangeLeaseResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -315,18 +323,21 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C
 //   - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method.
 //   - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method.
 func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) {
+	var err error
 	req, err := client.createCreateRequest(ctx, options, containerCPKScopeInfo)
 	if err != nil {
 		return ContainerClientCreateResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientCreateResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return ContainerClientCreateResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientCreateResponse{}, err
 	}
-	return client.createHandleResponse(resp)
+	resp, err := client.createHandleResponse(httpResp)
+	return resp, err
 }
 
 // createCreateRequest creates the Create request.
@@ -368,6 +379,16 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options
 // createHandleResponse handles the Create response.
 func (client *ContainerClient) createHandleResponse(resp *http.Response) (ContainerClientCreateResponse, error) {
 	result := ContainerClientCreateResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return ContainerClientCreateResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -378,22 +399,12 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return ContainerClientCreateResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -406,18 +417,21 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *ContainerClient) Delete(ctx context.Context, options *ContainerClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientDeleteResponse, error) {
+	var err error
 	req, err := client.deleteCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions)
 	if err != nil {
 		return ContainerClientDeleteResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientDeleteResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusAccepted) {
-		return ContainerClientDeleteResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientDeleteResponse{}, err
 	}
-	return client.deleteHandleResponse(resp)
+	resp, err := client.deleteHandleResponse(httpResp)
+	return resp, err
 }
 
 // deleteCreateRequest creates the Delete request.
@@ -455,12 +469,6 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -468,6 +476,12 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -479,18 +493,21 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai
 //   - where - Filters the results to return only to return only blobs whose tags match the specified expression.
 //   - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method.
 func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) {
+	var err error
 	req, err := client.filterBlobsCreateRequest(ctx, where, options)
 	if err != nil {
 		return ContainerClientFilterBlobsResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientFilterBlobsResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ContainerClientFilterBlobsResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientFilterBlobsResponse{}, err
 	}
-	return client.filterBlobsHandleResponse(resp)
+	resp, err := client.filterBlobsHandleResponse(httpResp)
+	return resp, err
 }
 
 // filterBlobsCreateRequest creates the FilterBlobs request.
@@ -530,12 +547,6 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -543,6 +554,12 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil {
 		return ContainerClientFilterBlobsResponse{}, err
 	}
@@ -558,18 +575,21 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C
 //     method.
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *ContainerClientGetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetAccessPolicyResponse, error) {
+	var err error
 	req, err := client.getAccessPolicyCreateRequest(ctx, options, leaseAccessConditions)
 	if err != nil {
 		return ContainerClientGetAccessPolicyResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientGetAccessPolicyResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ContainerClientGetAccessPolicyResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientGetAccessPolicyResponse{}, err
 	}
-	return client.getAccessPolicyHandleResponse(resp)
+	resp, err := client.getAccessPolicyHandleResponse(httpResp)
+	return resp, err
 }
 
 // getAccessPolicyCreateRequest creates the GetAccessPolicy request.
@@ -602,6 +622,16 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response
 	if val := resp.Header.Get("x-ms-blob-public-access"); val != "" {
 		result.BlobPublicAccess = (*PublicAccessType)(&val)
 	}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return ContainerClientGetAccessPolicyResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -612,22 +642,12 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return ContainerClientGetAccessPolicyResponse{}, err
-		}
-		result.Date = &date
-	}
 	if err := runtime.UnmarshalAsXML(resp, &result); err != nil {
 		return ContainerClientGetAccessPolicyResponse{}, err
 	}
@@ -641,18 +661,21 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response
 //   - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo
 //     method.
 func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) {
+	var err error
 	req, err := client.getAccountInfoCreateRequest(ctx, options)
 	if err != nil {
 		return ContainerClientGetAccountInfoResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientGetAccountInfoResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ContainerClientGetAccountInfoResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientGetAccountInfoResponse{}, err
 	}
-	return client.getAccountInfoHandleResponse(resp)
+	resp, err := client.getAccountInfoHandleResponse(httpResp)
+	return resp, err
 }
 
 // getAccountInfoCreateRequest creates the GetAccountInfo request.
@@ -673,15 +696,12 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context,
 // getAccountInfoHandleResponse handles the GetAccountInfo response.
 func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) (ContainerClientGetAccountInfoResponse, error) {
 	result := ContainerClientGetAccountInfoResponse{}
+	if val := resp.Header.Get("x-ms-account-kind"); val != "" {
+		result.AccountKind = (*AccountKind)(&val)
+	}
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -689,11 +709,14 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response)
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
 	if val := resp.Header.Get("x-ms-sku-name"); val != "" {
 		result.SKUName = (*SKUName)(&val)
 	}
-	if val := resp.Header.Get("x-ms-account-kind"); val != "" {
-		result.AccountKind = (*AccountKind)(&val)
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
 	}
 	return result, nil
 }
@@ -706,18 +729,21 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response)
 //   - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method.
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) {
+	var err error
 	req, err := client.getPropertiesCreateRequest(ctx, options, leaseAccessConditions)
 	if err != nil {
 		return ContainerClientGetPropertiesResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientGetPropertiesResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ContainerClientGetPropertiesResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientGetPropertiesResponse{}, err
 	}
-	return client.getPropertiesHandleResponse(resp)
+	resp, err := client.getPropertiesHandleResponse(httpResp)
+	return resp, err
 }
 
 // getPropertiesCreateRequest creates the GetProperties request.
@@ -746,17 +772,53 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o
 // getPropertiesHandleResponse handles the GetProperties response.
 func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) (ContainerClientGetPropertiesResponse, error) {
 	result := ContainerClientGetPropertiesResponse{}
-	for hh := range resp.Header {
-		if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") {
-			if result.Metadata == nil {
-				result.Metadata = map[string]*string{}
-			}
-			result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh))
+	if val := resp.Header.Get("x-ms-blob-public-access"); val != "" {
+		result.BlobPublicAccess = (*PublicAccessType)(&val)
+	}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return ContainerClientGetPropertiesResponse{}, err
 		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" {
+		result.DefaultEncryptionScope = &val
+	}
+	if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" {
+		denyEncryptionScopeOverride, err := strconv.ParseBool(val)
+		if err != nil {
+			return ContainerClientGetPropertiesResponse{}, err
+		}
+		result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride
 	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
+	if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" {
+		hasImmutabilityPolicy, err := strconv.ParseBool(val)
+		if err != nil {
+			return ContainerClientGetPropertiesResponse{}, err
+		}
+		result.HasImmutabilityPolicy = &hasImmutabilityPolicy
+	}
+	if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" {
+		hasLegalHold, err := strconv.ParseBool(val)
+		if err != nil {
+			return ContainerClientGetPropertiesResponse{}, err
+		}
+		result.HasLegalHold = &hasLegalHold
+	}
+	if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" {
+		isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val)
+		if err != nil {
+			return ContainerClientGetPropertiesResponse{}, err
+		}
+		result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled
+	}
 	if val := resp.Header.Get("Last-Modified"); val != "" {
 		lastModified, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -773,8 +835,13 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response)
 	if val := resp.Header.Get("x-ms-lease-status"); val != "" {
 		result.LeaseStatus = (*LeaseStatusType)(&val)
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
+	for hh := range resp.Header {
+		if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") {
+			if result.Metadata == nil {
+				result.Metadata = map[string]*string{}
+			}
+			result.Metadata[hh[len("x-ms-meta-"):]] = to.Ptr(resp.Header.Get(hh))
+		}
 	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
@@ -782,47 +849,6 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response)
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return ContainerClientGetPropertiesResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-blob-public-access"); val != "" {
-		result.BlobPublicAccess = (*PublicAccessType)(&val)
-	}
-	if val := resp.Header.Get("x-ms-has-immutability-policy"); val != "" {
-		hasImmutabilityPolicy, err := strconv.ParseBool(val)
-		if err != nil {
-			return ContainerClientGetPropertiesResponse{}, err
-		}
-		result.HasImmutabilityPolicy = &hasImmutabilityPolicy
-	}
-	if val := resp.Header.Get("x-ms-has-legal-hold"); val != "" {
-		hasLegalHold, err := strconv.ParseBool(val)
-		if err != nil {
-			return ContainerClientGetPropertiesResponse{}, err
-		}
-		result.HasLegalHold = &hasLegalHold
-	}
-	if val := resp.Header.Get("x-ms-default-encryption-scope"); val != "" {
-		result.DefaultEncryptionScope = &val
-	}
-	if val := resp.Header.Get("x-ms-deny-encryption-scope-override"); val != "" {
-		denyEncryptionScopeOverride, err := strconv.ParseBool(val)
-		if err != nil {
-			return ContainerClientGetPropertiesResponse{}, err
-		}
-		result.DenyEncryptionScopeOverride = &denyEncryptionScopeOverride
-	}
-	if val := resp.Header.Get("x-ms-immutable-storage-with-versioning-enabled"); val != "" {
-		isImmutableStorageWithVersioningEnabled, err := strconv.ParseBool(val)
-		if err != nil {
-			return ContainerClientGetPropertiesResponse{}, err
-		}
-		result.IsImmutableStorageWithVersioningEnabled = &isImmutableStorageWithVersioningEnabled
-	}
 	return result, nil
 }
 
@@ -868,17 +894,11 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont
 // listBlobFlatSegmentHandleResponse handles the ListBlobFlatSegment response.
 func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Response) (ContainerClientListBlobFlatSegmentResponse, error) {
 	result := ContainerClientListBlobFlatSegmentResponse{}
-	if val := resp.Header.Get("Content-Type"); val != "" {
-		result.ContentType = &val
-	}
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
+	if val := resp.Header.Get("Content-Type"); val != "" {
+		result.ContentType = &val
 	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
@@ -887,6 +907,12 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsFlatSegmentResponse); err != nil {
 		return ContainerClientListBlobFlatSegmentResponse{}, err
 	}
@@ -907,23 +933,16 @@ func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string
 			return page.NextMarker != nil && len(*page.NextMarker) > 0
 		},
 		Fetcher: func(ctx context.Context, page *ContainerClientListBlobHierarchySegmentResponse) (ContainerClientListBlobHierarchySegmentResponse, error) {
-			var req *policy.Request
-			var err error
-			if page == nil {
-				req, err = client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options)
-			} else {
-				req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker)
+			nextLink := ""
+			if page != nil {
+				nextLink = *page.NextMarker
 			}
+			resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) {
+				return client.ListBlobHierarchySegmentCreateRequest(ctx, delimiter, options)
+			}, nil)
 			if err != nil {
 				return ContainerClientListBlobHierarchySegmentResponse{}, err
 			}
-			resp, err := client.internal.Pipeline().Do(req)
-			if err != nil {
-				return ContainerClientListBlobHierarchySegmentResponse{}, err
-			}
-			if !runtime.HasStatusCode(resp, http.StatusOK) {
-				return ContainerClientListBlobHierarchySegmentResponse{}, runtime.NewResponseError(resp)
-			}
 			return client.ListBlobHierarchySegmentHandleResponse(resp)
 		},
 	})
@@ -966,17 +985,11 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context
 // ListBlobHierarchySegmentHandleResponse handles the ListBlobHierarchySegment response.
 func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http.Response) (ContainerClientListBlobHierarchySegmentResponse, error) {
 	result := ContainerClientListBlobHierarchySegmentResponse{}
-	if val := resp.Header.Get("Content-Type"); val != "" {
-		result.ContentType = &val
-	}
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
+	if val := resp.Header.Get("Content-Type"); val != "" {
+		result.ContentType = &val
 	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
@@ -985,6 +998,12 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	if err := runtime.UnmarshalAsXML(resp, &result.ListBlobsHierarchySegmentResponse); err != nil {
 		return ContainerClientListBlobHierarchySegmentResponse{}, err
 	}
@@ -1000,18 +1019,21 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http
 //   - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, options *ContainerClientReleaseLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientReleaseLeaseResponse, error) {
+	var err error
 	req, err := client.releaseLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions)
 	if err != nil {
 		return ContainerClientReleaseLeaseResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientReleaseLeaseResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ContainerClientReleaseLeaseResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientReleaseLeaseResponse{}, err
 	}
-	return client.releaseLeaseHandleResponse(resp)
+	resp, err := client.releaseLeaseHandleResponse(httpResp)
+	return resp, err
 }
 
 // releaseLeaseCreateRequest creates the ReleaseLease request.
@@ -1046,6 +1068,16 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le
 // releaseLeaseHandleResponse handles the ReleaseLease response.
 func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (ContainerClientReleaseLeaseResponse, error) {
 	result := ContainerClientReleaseLeaseResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return ContainerClientReleaseLeaseResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -1056,22 +1088,12 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return ContainerClientReleaseLeaseResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -1082,18 +1104,21 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (
 //   - sourceContainerName - Required. Specifies the name of the container to rename.
 //   - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method.
 func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) {
+	var err error
 	req, err := client.renameCreateRequest(ctx, sourceContainerName, options)
 	if err != nil {
 		return ContainerClientRenameResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientRenameResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ContainerClientRenameResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientRenameResponse{}, err
 	}
-	return client.renameHandleResponse(resp)
+	resp, err := client.renameHandleResponse(httpResp)
+	return resp, err
 }
 
 // renameCreateRequest creates the Rename request.
@@ -1127,12 +1152,6 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -1140,6 +1159,12 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -1152,18 +1177,21 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai
 //   - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, options *ContainerClientRenewLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientRenewLeaseResponse, error) {
+	var err error
 	req, err := client.renewLeaseCreateRequest(ctx, leaseID, options, modifiedAccessConditions)
 	if err != nil {
 		return ContainerClientRenewLeaseResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientRenewLeaseResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ContainerClientRenewLeaseResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientRenewLeaseResponse{}, err
 	}
-	return client.renewLeaseHandleResponse(resp)
+	resp, err := client.renewLeaseHandleResponse(httpResp)
+	return resp, err
 }
 
 // renewLeaseCreateRequest creates the RenewLease request.
@@ -1198,6 +1226,16 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas
 // renewLeaseHandleResponse handles the RenewLease response.
 func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (ContainerClientRenewLeaseResponse, error) {
 	result := ContainerClientRenewLeaseResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return ContainerClientRenewLeaseResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -1211,22 +1249,12 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co
 	if val := resp.Header.Get("x-ms-lease-id"); val != "" {
 		result.LeaseID = &val
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return ContainerClientRenewLeaseResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -1236,18 +1264,21 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co
 // Generated from API version 2023-08-03
 //   - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method.
 func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) {
+	var err error
 	req, err := client.restoreCreateRequest(ctx, options)
 	if err != nil {
 		return ContainerClientRestoreResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientRestoreResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return ContainerClientRestoreResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientRestoreResponse{}, err
 	}
-	return client.restoreHandleResponse(resp)
+	resp, err := client.restoreHandleResponse(httpResp)
+	return resp, err
 }
 
 // restoreCreateRequest creates the Restore request.
@@ -1283,12 +1314,6 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -1296,6 +1321,12 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -1310,18 +1341,21 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL []*SignedIdentifier, options *ContainerClientSetAccessPolicyOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetAccessPolicyResponse, error) {
+	var err error
 	req, err := client.setAccessPolicyCreateRequest(ctx, containerACL, options, leaseAccessConditions, modifiedAccessConditions)
 	if err != nil {
 		return ContainerClientSetAccessPolicyResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientSetAccessPolicyResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ContainerClientSetAccessPolicyResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientSetAccessPolicyResponse{}, err
 	}
-	return client.setAccessPolicyHandleResponse(resp)
+	resp, err := client.setAccessPolicyHandleResponse(httpResp)
+	return resp, err
 }
 
 // setAccessPolicyCreateRequest creates the SetAccessPolicy request.
@@ -1367,6 +1401,16 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context,
 // setAccessPolicyHandleResponse handles the SetAccessPolicy response.
 func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response) (ContainerClientSetAccessPolicyResponse, error) {
 	result := ContainerClientSetAccessPolicyResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return ContainerClientSetAccessPolicyResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -1377,22 +1421,12 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return ContainerClientSetAccessPolicyResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -1404,18 +1438,21 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *ContainerClient) SetMetadata(ctx context.Context, options *ContainerClientSetMetadataOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientSetMetadataResponse, error) {
+	var err error
 	req, err := client.setMetadataCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions)
 	if err != nil {
 		return ContainerClientSetMetadataResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientSetMetadataResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ContainerClientSetMetadataResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientSetMetadataResponse{}, err
 	}
-	return client.setMetadataHandleResponse(resp)
+	resp, err := client.setMetadataHandleResponse(httpResp)
+	return resp, err
 }
 
 // setMetadataCreateRequest creates the SetMetadata request.
@@ -1455,6 +1492,16 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt
 // setMetadataHandleResponse handles the SetMetadata response.
 func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (ContainerClientSetMetadataResponse, error) {
 	result := ContainerClientSetMetadataResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return ContainerClientSetMetadataResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -1465,22 +1512,12 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return ContainerClientSetMetadataResponse{}, err
-		}
-		result.Date = &date
-	}
 	return result, nil
 }
 
@@ -1494,18 +1531,21 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C
 //   - body - Initial data
 //   - options - ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method.
 func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ContainerClientSubmitBatchOptions) (ContainerClientSubmitBatchResponse, error) {
+	var err error
 	req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options)
 	if err != nil {
 		return ContainerClientSubmitBatchResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ContainerClientSubmitBatchResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusAccepted) {
-		return ContainerClientSubmitBatchResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {
+		err = runtime.NewResponseError(httpResp)
+		return ContainerClientSubmitBatchResponse{}, err
 	}
-	return client.submitBatchHandleResponse(resp)
+	resp, err := client.submitBatchHandleResponse(httpResp)
+	return resp, err
 }
 
 // submitBatchCreateRequest creates the SubmitBatch request.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go
index 1fed5f630b..7251de8395 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
@@ -26,89 +25,6 @@ type AccessPolicy struct {
 	Start *time.Time `xml:"Start"`
 }
 
-// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL
-// method.
-type AppendBlobClientAppendBlockFromURLOptions struct {
-	// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
-	CopySourceAuthorization *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// Specify the md5 calculated for the range of bytes that must be read from the copy source.
-	SourceContentMD5 []byte
-	// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
-	SourceContentcrc64 []byte
-	// Bytes of source data in the specified range.
-	SourceRange *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// Specify the transactional md5 for the body, to be validated by the service.
-	TransactionalContentMD5 []byte
-}
-
-// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method.
-type AppendBlobClientAppendBlockOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// Specify the transactional crc64 for the body, to be validated by the service.
-	TransactionalContentCRC64 []byte
-	// Specify the transactional md5 for the body, to be validated by the service.
-	TransactionalContentMD5 []byte
-}
-
-// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method.
-type AppendBlobClientCreateOptions struct {
-	// Optional. Used to set blob tags in various blob operations.
-	BlobTagsString *string
-	// Specifies the date time when the blobs immutability policy is set to expire.
-	ImmutabilityPolicyExpiry *time.Time
-	// Specifies the immutability policy mode to set on the blob.
-	ImmutabilityPolicyMode *ImmutabilityPolicySetting
-	// Specified if a legal hold should be set on the blob.
-	LegalHold *bool
-	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
-	// operation will copy the metadata from the source blob or file to the destination
-	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
-	// is not copied from the source blob or file. Note that beginning with
-	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
-	// Blobs, and Metadata for more information.
-	Metadata map[string]*string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method.
-type AppendBlobClientSealOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method.
-type AppendPositionAccessConditions struct {
-	// Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare.
-	// Append Block will succeed only if the append position is equal to this number. If
-	// it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed).
-	AppendPosition *int64
-	// Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would
-	// cause the blob to exceed that limit or if the blob size is already greater than
-	// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 -
-	// Precondition Failed).
-	MaxSize *int64
-}
-
 // ArrowConfiguration - Groups the settings used for formatting the response if the response should be Arrow formatted.
 type ArrowConfiguration struct {
 	// REQUIRED
@@ -124,405 +40,11 @@ type ArrowField struct {
 	Scale     *int32  `xml:"Scale"`
 }
 
-// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method.
-type BlobClientAbortCopyFromURLOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method.
-type BlobClientAcquireLeaseOptions struct {
-	// Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is
-	// not in the correct format. See Guid Constructor (String) for a list of valid GUID
-	// string formats.
-	ProposedLeaseID *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method.
-type BlobClientBreakLeaseOptions struct {
-	// For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This
-	// break period is only used if it is shorter than the time remaining on the
-	// lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has
-	// expired, but the lease may be held for longer than the break period. If this
-	// header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses,
-	// and an infinite lease breaks immediately.
-	BreakPeriod *int32
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method.
-type BlobClientChangeLeaseOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method.
-type BlobClientCopyFromURLOptions struct {
-	// Optional. Used to set blob tags in various blob operations.
-	BlobTagsString *string
-	// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
-	CopySourceAuthorization *string
-	// Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags.
-	CopySourceTags *BlobCopySourceTags
-	// Specifies the date time when the blobs immutability policy is set to expire.
-	ImmutabilityPolicyExpiry *time.Time
-	// Specifies the immutability policy mode to set on the blob.
-	ImmutabilityPolicyMode *ImmutabilityPolicySetting
-	// Specified if a legal hold should be set on the blob.
-	LegalHold *bool
-	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
-	// operation will copy the metadata from the source blob or file to the destination
-	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
-	// is not copied from the source blob or file. Note that beginning with
-	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
-	// Blobs, and Metadata for more information.
-	Metadata map[string]*string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// Specify the md5 calculated for the range of bytes that must be read from the copy source.
-	SourceContentMD5 []byte
-	// Optional. Indicates the tier to be set on the blob.
-	Tier *AccessTier
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method.
-type BlobClientCreateSnapshotOptions struct {
-	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
-	// operation will copy the metadata from the source blob or file to the destination
-	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
-	// is not copied from the source blob or file. Note that beginning with
-	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
-	// Blobs, and Metadata for more information.
-	Metadata map[string]*string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy
-// method.
-type BlobClientDeleteImmutabilityPolicyOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method.
-type BlobClientDeleteOptions struct {
-	// Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob
-	// and all of its snapshots. only: Delete only the blob's snapshots and not the blob
-	// itself
-	DeleteSnapshots *DeleteSnapshotsOptionType
-	// Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled.
-	DeleteType *DeleteType
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
-	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
-	Snapshot *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
-	// It's for service version 2019-10-10 and newer.
-	VersionID *string
-}
-
-// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method.
-type BlobClientDownloadOptions struct {
-	// Return only the bytes of the blob in the specified range.
-	Range *string
-	// When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the
-	// range is less than or equal to 4 MB in size.
-	RangeGetContentCRC64 *bool
-	// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the
-	// range is less than or equal to 4 MB in size.
-	RangeGetContentMD5 *bool
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
-	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
-	Snapshot *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
-	// It's for service version 2019-10-10 and newer.
-	VersionID *string
-}
-
-// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method.
-type BlobClientGetAccountInfoOptions struct {
-	// placeholder for future optional parameters
-}
-
-// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method.
-type BlobClientGetPropertiesOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
-	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
-	Snapshot *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
-	// It's for service version 2019-10-10 and newer.
-	VersionID *string
-}
-
-// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method.
-type BlobClientGetTagsOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
-	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
-	Snapshot *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
-	// It's for service version 2019-10-10 and newer.
-	VersionID *string
-}
-
-// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method.
-type BlobClientQueryOptions struct {
-	// the query request
-	QueryRequest *QueryRequest
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
-	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
-	Snapshot *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method.
-type BlobClientReleaseLeaseOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method.
-type BlobClientRenewLeaseOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method.
-type BlobClientSetExpiryOptions struct {
-	// The time to set the blob to expiry
-	ExpiresOn *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method.
-type BlobClientSetHTTPHeadersOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method.
-type BlobClientSetImmutabilityPolicyOptions struct {
-	// Specifies the date time when the blobs immutability policy is set to expire.
-	ImmutabilityPolicyExpiry *time.Time
-	// Specifies the immutability policy mode to set on the blob.
-	ImmutabilityPolicyMode *ImmutabilityPolicySetting
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method.
-type BlobClientSetLegalHoldOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method.
-type BlobClientSetMetadataOptions struct {
-	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
-	// operation will copy the metadata from the source blob or file to the destination
-	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
-	// is not copied from the source blob or file. Note that beginning with
-	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
-	// Blobs, and Metadata for more information.
-	Metadata map[string]*string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method.
-type BlobClientSetTagsOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// Specify the transactional crc64 for the body, to be validated by the service.
-	TransactionalContentCRC64 []byte
-	// Specify the transactional md5 for the body, to be validated by the service.
-	TransactionalContentMD5 []byte
-	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
-	// It's for service version 2019-10-10 and newer.
-	VersionID *string
-}
-
-// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method.
-type BlobClientSetTierOptions struct {
-	// Optional: Indicates the priority with which to rehydrate an archived blob.
-	RehydratePriority *RehydratePriority
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
-	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
-	Snapshot *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
-	// It's for service version 2019-10-10 and newer.
-	VersionID *string
-}
-
-// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method.
-type BlobClientStartCopyFromURLOptions struct {
-	// Optional. Used to set blob tags in various blob operations.
-	BlobTagsString *string
-	// Specifies the date time when the blobs immutability policy is set to expire.
-	ImmutabilityPolicyExpiry *time.Time
-	// Specifies the immutability policy mode to set on the blob.
-	ImmutabilityPolicyMode *ImmutabilityPolicySetting
-	// Specified if a legal hold should be set on the blob.
-	LegalHold *bool
-	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
-	// operation will copy the metadata from the source blob or file to the destination
-	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
-	// is not copied from the source blob or file. Note that beginning with
-	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
-	// Blobs, and Metadata for more information.
-	Metadata map[string]*string
-	// Optional: Indicates the priority with which to rehydrate an archived blob.
-	RehydratePriority *RehydratePriority
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer.
-	SealBlob *bool
-	// Optional. Indicates the tier to be set on the blob.
-	Tier *AccessTier
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method.
-type BlobClientUndeleteOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
 type BlobFlatListSegment struct {
 	// REQUIRED
 	BlobItems []*BlobItem `xml:"Blob"`
 }
 
-// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
-type BlobHTTPHeaders struct {
-	// Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request.
-	BlobCacheControl *string
-	// Optional. Sets the blob's Content-Disposition header.
-	BlobContentDisposition *string
-	// Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read
-	// request.
-	BlobContentEncoding *string
-	// Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read
-	// request.
-	BlobContentLanguage *string
-	// Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks
-	// were validated when each was uploaded.
-	BlobContentMD5 []byte
-	// Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request.
-	BlobContentType *string
-}
-
 type BlobHierarchyListSegment struct {
 	// REQUIRED
 	BlobItems    []*BlobItem   `xml:"Blob"`
@@ -646,145 +168,6 @@ type Block struct {
 	Size *int64 `xml:"Size"`
 }
 
-// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method.
-type BlockBlobClientCommitBlockListOptions struct {
-	// Optional. Used to set blob tags in various blob operations.
-	BlobTagsString *string
-	// Specifies the date time when the blobs immutability policy is set to expire.
-	ImmutabilityPolicyExpiry *time.Time
-	// Specifies the immutability policy mode to set on the blob.
-	ImmutabilityPolicyMode *ImmutabilityPolicySetting
-	// Specified if a legal hold should be set on the blob.
-	LegalHold *bool
-	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
-	// operation will copy the metadata from the source blob or file to the destination
-	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
-	// is not copied from the source blob or file. Note that beginning with
-	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
-	// Blobs, and Metadata for more information.
-	Metadata map[string]*string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// Optional. Indicates the tier to be set on the blob.
-	Tier *AccessTier
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// Specify the transactional crc64 for the body, to be validated by the service.
-	TransactionalContentCRC64 []byte
-	// Specify the transactional md5 for the body, to be validated by the service.
-	TransactionalContentMD5 []byte
-}
-
-// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method.
-type BlockBlobClientGetBlockListOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
-	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
-	Snapshot *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method.
-type BlockBlobClientPutBlobFromURLOptions struct {
-	// Optional. Used to set blob tags in various blob operations.
-	BlobTagsString *string
-	// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
-	CopySourceAuthorization *string
-	// Optional, default is true. Indicates if properties from the source blob should be copied.
-	CopySourceBlobProperties *bool
-	// Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags.
-	CopySourceTags *BlobCopySourceTags
-	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
-	// operation will copy the metadata from the source blob or file to the destination
-	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
-	// is not copied from the source blob or file. Note that beginning with
-	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
-	// Blobs, and Metadata for more information.
-	Metadata map[string]*string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// Specify the md5 calculated for the range of bytes that must be read from the copy source.
-	SourceContentMD5 []byte
-	// Optional. Indicates the tier to be set on the blob.
-	Tier *AccessTier
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// Specify the transactional md5 for the body, to be validated by the service.
-	TransactionalContentMD5 []byte
-}
-
-// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method.
-type BlockBlobClientStageBlockFromURLOptions struct {
-	// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
-	CopySourceAuthorization *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// Specify the md5 calculated for the range of bytes that must be read from the copy source.
-	SourceContentMD5 []byte
-	// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
-	SourceContentcrc64 []byte
-	// Bytes of source data in the specified range.
-	SourceRange *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method.
-type BlockBlobClientStageBlockOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// Specify the transactional crc64 for the body, to be validated by the service.
-	TransactionalContentCRC64 []byte
-	// Specify the transactional md5 for the body, to be validated by the service.
-	TransactionalContentMD5 []byte
-}
-
-// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method.
-type BlockBlobClientUploadOptions struct {
-	// Optional. Used to set blob tags in various blob operations.
-	BlobTagsString *string
-	// Specifies the date time when the blobs immutability policy is set to expire.
-	ImmutabilityPolicyExpiry *time.Time
-	// Specifies the immutability policy mode to set on the blob.
-	ImmutabilityPolicyMode *ImmutabilityPolicySetting
-	// Specified if a legal hold should be set on the blob.
-	LegalHold *bool
-	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
-	// operation will copy the metadata from the source blob or file to the destination
-	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
-	// is not copied from the source blob or file. Note that beginning with
-	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
-	// Blobs, and Metadata for more information.
-	Metadata map[string]*string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// Optional. Indicates the tier to be set on the blob.
-	Tier *AccessTier
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// Specify the transactional crc64 for the body, to be validated by the service.
-	TransactionalContentCRC64 []byte
-	// Specify the transactional md5 for the body, to be validated by the service.
-	TransactionalContentMD5 []byte
-}
-
 type BlockList struct {
 	CommittedBlocks   []*Block `xml:"CommittedBlocks>Block"`
 	UncommittedBlocks []*Block `xml:"UncommittedBlocks>Block"`
@@ -804,274 +187,6 @@ type ClearRange struct {
 	Start *int64 `xml:"Start"`
 }
 
-// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method.
-type ContainerClientAcquireLeaseOptions struct {
-	// Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is
-	// not in the correct format. See Guid Constructor (String) for a list of valid GUID
-	// string formats.
-	ProposedLeaseID *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method.
-type ContainerClientBreakLeaseOptions struct {
-	// For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This
-	// break period is only used if it is shorter than the time remaining on the
-	// lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has
-	// expired, but the lease may be held for longer than the break period. If this
-	// header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses,
-	// and an infinite lease breaks immediately.
-	BreakPeriod *int32
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method.
-type ContainerClientChangeLeaseOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method.
-type ContainerClientCreateOptions struct {
-	// Specifies whether data in the container may be accessed publicly and the level of access
-	Access *PublicAccessType
-	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
-	// operation will copy the metadata from the source blob or file to the destination
-	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
-	// is not copied from the source blob or file. Note that beginning with
-	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
-	// Blobs, and Metadata for more information.
-	Metadata map[string]*string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method.
-type ContainerClientDeleteOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method.
-type ContainerClientFilterBlobsOptions struct {
-	// Include this parameter to specify one or more datasets to include in the response.
-	Include []FilterBlobsIncludeItem
-	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
-	// operation returns the NextMarker value within the response body if the listing
-	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
-	// as the value for the marker parameter in a subsequent call to request the next
-	// page of list items. The marker value is opaque to the client.
-	Marker *string
-	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
-	// greater than 5000, the server will return up to 5000 items. Note that if the
-	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
-	// of the results. For this reason, it is possible that the service will
-	// return fewer results than specified by maxresults, or than the default of 5000.
-	Maxresults *int32
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method.
-type ContainerClientGetAccessPolicyOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method.
-type ContainerClientGetAccountInfoOptions struct {
-	// placeholder for future optional parameters
-}
-
-// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method.
-type ContainerClientGetPropertiesOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager
-// method.
-type ContainerClientListBlobFlatSegmentOptions struct {
-	// Include this parameter to specify one or more datasets to include in the response.
-	Include []ListBlobsIncludeItem
-	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
-	// operation returns the NextMarker value within the response body if the listing
-	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
-	// as the value for the marker parameter in a subsequent call to request the next
-	// page of list items. The marker value is opaque to the client.
-	Marker *string
-	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
-	// greater than 5000, the server will return up to 5000 items. Note that if the
-	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
-	// of the results. For this reason, it is possible that the service will
-	// return fewer results than specified by maxresults, or than the default of 5000.
-	Maxresults *int32
-	// Filters the results to return only containers whose name begins with the specified prefix.
-	Prefix *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager
-// method.
-type ContainerClientListBlobHierarchySegmentOptions struct {
-	// Include this parameter to specify one or more datasets to include in the response.
-	Include []ListBlobsIncludeItem
-	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
-	// operation returns the NextMarker value within the response body if the listing
-	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
-	// as the value for the marker parameter in a subsequent call to request the next
-	// page of list items. The marker value is opaque to the client.
-	Marker *string
-	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
-	// greater than 5000, the server will return up to 5000 items. Note that if the
-	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
-	// of the results. For this reason, it is possible that the service will
-	// return fewer results than specified by maxresults, or than the default of 5000.
-	Maxresults *int32
-	// Filters the results to return only containers whose name begins with the specified prefix.
-	Prefix *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method.
-type ContainerClientReleaseLeaseOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method.
-type ContainerClientRenameOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match.
-	SourceLeaseID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method.
-type ContainerClientRenewLeaseOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method.
-type ContainerClientRestoreOptions struct {
-	// Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore.
-	DeletedContainerName *string
-	// Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore.
-	DeletedContainerVersion *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method.
-type ContainerClientSetAccessPolicyOptions struct {
-	// Specifies whether data in the container may be accessed publicly and the level of access
-	Access *PublicAccessType
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method.
-type ContainerClientSetMetadataOptions struct {
-	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
-	// operation will copy the metadata from the source blob or file to the destination
-	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
-	// is not copied from the source blob or file. Note that beginning with
-	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
-	// Blobs, and Metadata for more information.
-	Metadata map[string]*string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method.
-type ContainerClientSubmitBatchOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method.
-type ContainerCPKScopeInfo struct {
-	// Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all
-	// future writes.
-	DefaultEncryptionScope *string
-	// Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than
-	// the scope set on the container.
-	PreventEncryptionScopeOverride *bool
-}
-
 // ContainerItem - An Azure Storage container
 type ContainerItem struct {
 	// REQUIRED
@@ -1133,27 +248,6 @@ type CORSRule struct {
 	MaxAgeInSeconds *int32 `xml:"MaxAgeInSeconds"`
 }
 
-// CPKInfo contains a group of parameters for the BlobClient.Download method.
-type CPKInfo struct {
-	// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
-	// if the x-ms-encryption-key header is provided.
-	EncryptionAlgorithm *EncryptionAlgorithmType
-	// Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption
-	// is performed with the root account encryption key. For more information, see
-	// Encryption at Rest for Azure Storage Services.
-	EncryptionKey *string
-	// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided.
-	EncryptionKeySHA256 *string
-}
-
-// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
-type CPKScopeInfo struct {
-	// Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided
-	// in the request. If not specified, encryption is performed with the default
-	// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services.
-	EncryptionScope *string
-}
-
 // DelimitedTextConfiguration - Groups the settings used for interpreting the blob data if the blob is delimited text formatted.
 type DelimitedTextConfiguration struct {
 	// The string used to separate columns.
@@ -1225,12 +319,6 @@ type KeyInfo struct {
 	Start *string `xml:"Start"`
 }
 
-// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
-type LeaseAccessConditions struct {
-	// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
-	LeaseID *string
-}
-
 // ListBlobsFlatSegmentResponse - An enumeration of blobs
 type ListBlobsFlatSegmentResponse struct {
 	// REQUIRED
@@ -1310,195 +398,6 @@ type Metrics struct {
 	Version *string `xml:"Version"`
 }
 
-// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
-type ModifiedAccessConditions struct {
-	// Specify an ETag value to operate only on blobs with a matching value.
-	IfMatch *azcore.ETag
-	// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
-	IfModifiedSince *time.Time
-	// Specify an ETag value to operate only on blobs without a matching value.
-	IfNoneMatch *azcore.ETag
-	// Specify a SQL where clause on blob tags to operate only on blobs with a matching value.
-	IfTags *string
-	// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
-	IfUnmodifiedSince *time.Time
-}
-
-// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method.
-type PageBlobClientClearPagesOptions struct {
-	// Return only the bytes of the blob in the specified range.
-	Range *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method.
-type PageBlobClientCopyIncrementalOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method.
-type PageBlobClientCreateOptions struct {
-	// Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
-	// the sequence number must be between 0 and 2^63 - 1.
-	BlobSequenceNumber *int64
-	// Optional. Used to set blob tags in various blob operations.
-	BlobTagsString *string
-	// Specifies the date time when the blobs immutability policy is set to expire.
-	ImmutabilityPolicyExpiry *time.Time
-	// Specifies the immutability policy mode to set on the blob.
-	ImmutabilityPolicyMode *ImmutabilityPolicySetting
-	// Specified if a legal hold should be set on the blob.
-	LegalHold *bool
-	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
-	// operation will copy the metadata from the source blob or file to the destination
-	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
-	// is not copied from the source blob or file. Note that beginning with
-	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
-	// Blobs, and Metadata for more information.
-	Metadata map[string]*string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// Optional. Indicates the tier to be set on the page blob.
-	Tier *PremiumPageBlobAccessTier
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager
-// method.
-type PageBlobClientGetPageRangesDiffOptions struct {
-	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
-	// operation returns the NextMarker value within the response body if the listing
-	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
-	// as the value for the marker parameter in a subsequent call to request the next
-	// page of list items. The marker value is opaque to the client.
-	Marker *string
-	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
-	// greater than 5000, the server will return up to 5000 items. Note that if the
-	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
-	// of the results. For this reason, it is possible that the service will
-	// return fewer results than specified by maxresults, or than the default of 5000.
-	Maxresults *int32
-	// Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot
-	// of the target blob. The response will only contain pages that were changed
-	// between the target blob and its previous snapshot.
-	PrevSnapshotURL *string
-	// Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response
-	// will contain only pages that were changed between target blob and previous
-	// snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot
-	// specified by prevsnapshot is the older of the two. Note that incremental
-	// snapshots are currently supported only for blobs created on or after January 1, 2016.
-	Prevsnapshot *string
-	// Return only the bytes of the blob in the specified range.
-	Range *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
-	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
-	Snapshot *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method.
-type PageBlobClientGetPageRangesOptions struct {
-	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
-	// operation returns the NextMarker value within the response body if the listing
-	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
-	// as the value for the marker parameter in a subsequent call to request the next
-	// page of list items. The marker value is opaque to the client.
-	Marker *string
-	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
-	// greater than 5000, the server will return up to 5000 items. Note that if the
-	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
-	// of the results. For this reason, it is possible that the service will
-	// return fewer results than specified by maxresults, or than the default of 5000.
-	Maxresults *int32
-	// Return only the bytes of the blob in the specified range.
-	Range *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
-	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
-	Snapshot *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method.
-type PageBlobClientResizeOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber
-// method.
-type PageBlobClientUpdateSequenceNumberOptions struct {
-	// Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
-	// the sequence number must be between 0 and 2^63 - 1.
-	BlobSequenceNumber *int64
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method.
-type PageBlobClientUploadPagesFromURLOptions struct {
-	// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
-	CopySourceAuthorization *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// Specify the md5 calculated for the range of bytes that must be read from the copy source.
-	SourceContentMD5 []byte
-	// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
-	SourceContentcrc64 []byte
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method.
-type PageBlobClientUploadPagesOptions struct {
-	// Return only the bytes of the blob in the specified range.
-	Range *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-	// Specify the transactional crc64 for the body, to be validated by the service.
-	TransactionalContentCRC64 []byte
-	// Specify the transactional md5 for the body, to be validated by the service.
-	TransactionalContentMD5 []byte
-}
-
 // PageList - the list of pages
 type PageList struct {
 	ClearRange []*ClearRange `xml:"ClearRange"`
@@ -1561,122 +460,6 @@ type RetentionPolicy struct {
 	Days *int32 `xml:"Days"`
 }
 
-// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method.
-type SequenceNumberAccessConditions struct {
-	// Specify this header value to operate only on a blob if it has the specified sequence number.
-	IfSequenceNumberEqualTo *int64
-	// Specify this header value to operate only on a blob if it has a sequence number less than the specified.
-	IfSequenceNumberLessThan *int64
-	// Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified.
-	IfSequenceNumberLessThanOrEqualTo *int64
-}
-
-// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method.
-type ServiceClientFilterBlobsOptions struct {
-	// Include this parameter to specify one or more datasets to include in the response.
-	Include []FilterBlobsIncludeItem
-	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
-	// operation returns the NextMarker value within the response body if the listing
-	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
-	// as the value for the marker parameter in a subsequent call to request the next
-	// page of list items. The marker value is opaque to the client.
-	Marker *string
-	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
-	// greater than 5000, the server will return up to 5000 items. Note that if the
-	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
-	// of the results. For this reason, it is possible that the service will
-	// return fewer results than specified by maxresults, or than the default of 5000.
-	Maxresults *int32
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method.
-type ServiceClientGetAccountInfoOptions struct {
-	// placeholder for future optional parameters
-}
-
-// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method.
-type ServiceClientGetPropertiesOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method.
-type ServiceClientGetStatisticsOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method.
-type ServiceClientGetUserDelegationKeyOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager
-// method.
-type ServiceClientListContainersSegmentOptions struct {
-	// Include this parameter to specify that the container's metadata be returned as part of the response body.
-	Include []ListContainersIncludeType
-	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
-	// operation returns the NextMarker value within the response body if the listing
-	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
-	// as the value for the marker parameter in a subsequent call to request the next
-	// page of list items. The marker value is opaque to the client.
-	Marker *string
-	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
-	// greater than 5000, the server will return up to 5000 items. Note that if the
-	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
-	// of the results. For this reason, it is possible that the service will
-	// return fewer results than specified by maxresults, or than the default of 5000.
-	Maxresults *int32
-	// Filters the results to return only containers whose name begins with the specified prefix.
-	Prefix *string
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method.
-type ServiceClientSetPropertiesOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
-// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method.
-type ServiceClientSubmitBatchOptions struct {
-	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
-	// analytics logging is enabled.
-	RequestID *string
-	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
-	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
-	Timeout *int32
-}
-
 // SignedIdentifier - signed identifier
 type SignedIdentifier struct {
 	// REQUIRED; An Access policy
@@ -1686,20 +469,6 @@ type SignedIdentifier struct {
 	ID *string `xml:"Id"`
 }
 
-// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method.
-type SourceModifiedAccessConditions struct {
-	// Specify an ETag value to operate only on blobs with a matching value.
-	SourceIfMatch *azcore.ETag
-	// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
-	SourceIfModifiedSince *time.Time
-	// Specify an ETag value to operate only on blobs without a matching value.
-	SourceIfNoneMatch *azcore.ETag
-	// Specify a SQL where clause on blob tags to operate only on blobs with a matching value.
-	SourceIfTags *string
-	// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
-	SourceIfUnmodifiedSince *time.Time
-}
-
 // StaticWebsite - The properties that enable an account to host a static website
 type StaticWebsite struct {
 	// REQUIRED; Indicates whether this account is hosting a static website
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go
index dc5dba1037..7e094db873 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
@@ -24,12 +23,12 @@ func (a AccessPolicy) MarshalXML(enc *xml.Encoder, start xml.StartElement) error
 	type alias AccessPolicy
 	aux := &struct {
 		*alias
-		Expiry *timeRFC3339 `xml:"Expiry"`
-		Start  *timeRFC3339 `xml:"Start"`
+		Expiry *dateTimeRFC3339 `xml:"Expiry"`
+		Start  *dateTimeRFC3339 `xml:"Start"`
 	}{
 		alias:  (*alias)(&a),
-		Expiry: (*timeRFC3339)(a.Expiry),
-		Start:  (*timeRFC3339)(a.Start),
+		Expiry: (*dateTimeRFC3339)(a.Expiry),
+		Start:  (*dateTimeRFC3339)(a.Start),
 	}
 	return enc.EncodeElement(aux, start)
 }
@@ -39,8 +38,8 @@ func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) er
 	type alias AccessPolicy
 	aux := &struct {
 		*alias
-		Expiry *timeRFC3339 `xml:"Expiry"`
-		Start  *timeRFC3339 `xml:"Start"`
+		Expiry *dateTimeRFC3339 `xml:"Expiry"`
+		Start  *dateTimeRFC3339 `xml:"Start"`
 	}{
 		alias: (*alias)(a),
 	}
@@ -106,25 +105,25 @@ func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) err
 	type alias BlobProperties
 	aux := &struct {
 		*alias
-		AccessTierChangeTime        *timeRFC1123 `xml:"AccessTierChangeTime"`
-		ContentMD5                  *string      `xml:"Content-MD5"`
-		CopyCompletionTime          *timeRFC1123 `xml:"CopyCompletionTime"`
-		CreationTime                *timeRFC1123 `xml:"Creation-Time"`
-		DeletedTime                 *timeRFC1123 `xml:"DeletedTime"`
-		ExpiresOn                   *timeRFC1123 `xml:"Expiry-Time"`
-		ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"`
-		LastAccessedOn              *timeRFC1123 `xml:"LastAccessTime"`
-		LastModified                *timeRFC1123 `xml:"Last-Modified"`
+		AccessTierChangeTime        *dateTimeRFC1123 `xml:"AccessTierChangeTime"`
+		ContentMD5                  *string          `xml:"Content-MD5"`
+		CopyCompletionTime          *dateTimeRFC1123 `xml:"CopyCompletionTime"`
+		CreationTime                *dateTimeRFC1123 `xml:"Creation-Time"`
+		DeletedTime                 *dateTimeRFC1123 `xml:"DeletedTime"`
+		ExpiresOn                   *dateTimeRFC1123 `xml:"Expiry-Time"`
+		ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"`
+		LastAccessedOn              *dateTimeRFC1123 `xml:"LastAccessTime"`
+		LastModified                *dateTimeRFC1123 `xml:"Last-Modified"`
 	}{
 		alias:                       (*alias)(&b),
-		AccessTierChangeTime:        (*timeRFC1123)(b.AccessTierChangeTime),
-		CopyCompletionTime:          (*timeRFC1123)(b.CopyCompletionTime),
-		CreationTime:                (*timeRFC1123)(b.CreationTime),
-		DeletedTime:                 (*timeRFC1123)(b.DeletedTime),
-		ExpiresOn:                   (*timeRFC1123)(b.ExpiresOn),
-		ImmutabilityPolicyExpiresOn: (*timeRFC1123)(b.ImmutabilityPolicyExpiresOn),
-		LastAccessedOn:              (*timeRFC1123)(b.LastAccessedOn),
-		LastModified:                (*timeRFC1123)(b.LastModified),
+		AccessTierChangeTime:        (*dateTimeRFC1123)(b.AccessTierChangeTime),
+		CopyCompletionTime:          (*dateTimeRFC1123)(b.CopyCompletionTime),
+		CreationTime:                (*dateTimeRFC1123)(b.CreationTime),
+		DeletedTime:                 (*dateTimeRFC1123)(b.DeletedTime),
+		ExpiresOn:                   (*dateTimeRFC1123)(b.ExpiresOn),
+		ImmutabilityPolicyExpiresOn: (*dateTimeRFC1123)(b.ImmutabilityPolicyExpiresOn),
+		LastAccessedOn:              (*dateTimeRFC1123)(b.LastAccessedOn),
+		LastModified:                (*dateTimeRFC1123)(b.LastModified),
 	}
 	if b.ContentMD5 != nil {
 		encodedContentMD5 := runtime.EncodeByteArray(b.ContentMD5, runtime.Base64StdFormat)
@@ -138,15 +137,15 @@ func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement)
 	type alias BlobProperties
 	aux := &struct {
 		*alias
-		AccessTierChangeTime        *timeRFC1123 `xml:"AccessTierChangeTime"`
-		ContentMD5                  *string      `xml:"Content-MD5"`
-		CopyCompletionTime          *timeRFC1123 `xml:"CopyCompletionTime"`
-		CreationTime                *timeRFC1123 `xml:"Creation-Time"`
-		DeletedTime                 *timeRFC1123 `xml:"DeletedTime"`
-		ExpiresOn                   *timeRFC1123 `xml:"Expiry-Time"`
-		ImmutabilityPolicyExpiresOn *timeRFC1123 `xml:"ImmutabilityPolicyUntilDate"`
-		LastAccessedOn              *timeRFC1123 `xml:"LastAccessTime"`
-		LastModified                *timeRFC1123 `xml:"Last-Modified"`
+		AccessTierChangeTime        *dateTimeRFC1123 `xml:"AccessTierChangeTime"`
+		ContentMD5                  *string          `xml:"Content-MD5"`
+		CopyCompletionTime          *dateTimeRFC1123 `xml:"CopyCompletionTime"`
+		CreationTime                *dateTimeRFC1123 `xml:"Creation-Time"`
+		DeletedTime                 *dateTimeRFC1123 `xml:"DeletedTime"`
+		ExpiresOn                   *dateTimeRFC1123 `xml:"Expiry-Time"`
+		ImmutabilityPolicyExpiresOn *dateTimeRFC1123 `xml:"ImmutabilityPolicyUntilDate"`
+		LastAccessedOn              *dateTimeRFC1123 `xml:"LastAccessTime"`
+		LastModified                *dateTimeRFC1123 `xml:"Last-Modified"`
 	}{
 		alias: (*alias)(b),
 	}
@@ -249,12 +248,12 @@ func (c ContainerProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement
 	type alias ContainerProperties
 	aux := &struct {
 		*alias
-		DeletedTime  *timeRFC1123 `xml:"DeletedTime"`
-		LastModified *timeRFC1123 `xml:"Last-Modified"`
+		DeletedTime  *dateTimeRFC1123 `xml:"DeletedTime"`
+		LastModified *dateTimeRFC1123 `xml:"Last-Modified"`
 	}{
 		alias:        (*alias)(&c),
-		DeletedTime:  (*timeRFC1123)(c.DeletedTime),
-		LastModified: (*timeRFC1123)(c.LastModified),
+		DeletedTime:  (*dateTimeRFC1123)(c.DeletedTime),
+		LastModified: (*dateTimeRFC1123)(c.LastModified),
 	}
 	return enc.EncodeElement(aux, start)
 }
@@ -264,8 +263,8 @@ func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElem
 	type alias ContainerProperties
 	aux := &struct {
 		*alias
-		DeletedTime  *timeRFC1123 `xml:"DeletedTime"`
-		LastModified *timeRFC1123 `xml:"Last-Modified"`
+		DeletedTime  *dateTimeRFC1123 `xml:"DeletedTime"`
+		LastModified *dateTimeRFC1123 `xml:"Last-Modified"`
 	}{
 		alias: (*alias)(c),
 	}
@@ -297,10 +296,10 @@ func (g GeoReplication) MarshalXML(enc *xml.Encoder, start xml.StartElement) err
 	type alias GeoReplication
 	aux := &struct {
 		*alias
-		LastSyncTime *timeRFC1123 `xml:"LastSyncTime"`
+		LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"`
 	}{
 		alias:        (*alias)(&g),
-		LastSyncTime: (*timeRFC1123)(g.LastSyncTime),
+		LastSyncTime: (*dateTimeRFC1123)(g.LastSyncTime),
 	}
 	return enc.EncodeElement(aux, start)
 }
@@ -310,7 +309,7 @@ func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement)
 	type alias GeoReplication
 	aux := &struct {
 		*alias
-		LastSyncTime *timeRFC1123 `xml:"LastSyncTime"`
+		LastSyncTime *dateTimeRFC1123 `xml:"LastSyncTime"`
 	}{
 		alias: (*alias)(g),
 	}
@@ -414,12 +413,12 @@ func (u UserDelegationKey) MarshalXML(enc *xml.Encoder, start xml.StartElement)
 	type alias UserDelegationKey
 	aux := &struct {
 		*alias
-		SignedExpiry *timeRFC3339 `xml:"SignedExpiry"`
-		SignedStart  *timeRFC3339 `xml:"SignedStart"`
+		SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"`
+		SignedStart  *dateTimeRFC3339 `xml:"SignedStart"`
 	}{
 		alias:        (*alias)(&u),
-		SignedExpiry: (*timeRFC3339)(u.SignedExpiry),
-		SignedStart:  (*timeRFC3339)(u.SignedStart),
+		SignedExpiry: (*dateTimeRFC3339)(u.SignedExpiry),
+		SignedStart:  (*dateTimeRFC3339)(u.SignedStart),
 	}
 	return enc.EncodeElement(aux, start)
 }
@@ -429,8 +428,8 @@ func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElemen
 	type alias UserDelegationKey
 	aux := &struct {
 		*alias
-		SignedExpiry *timeRFC3339 `xml:"SignedExpiry"`
-		SignedStart  *timeRFC3339 `xml:"SignedStart"`
+		SignedExpiry *dateTimeRFC3339 `xml:"SignedExpiry"`
+		SignedStart  *dateTimeRFC3339 `xml:"SignedStart"`
 	}{
 		alias: (*alias)(u),
 	}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go
new file mode 100644
index 0000000000..216f8b73ae
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go
@@ -0,0 +1,1469 @@
+//go:build go1.18
+// +build go1.18
+
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+package generated
+
+import (
+	"github.com/Azure/azure-sdk-for-go/sdk/azcore"
+	"time"
+)
+
+// AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL
+// method.
+type AppendBlobClientAppendBlockFromURLOptions struct {
+	// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+	CopySourceAuthorization *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// Specify the md5 calculated for the range of bytes that must be read from the copy source.
+	SourceContentMD5 []byte
+
+	// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
+	SourceContentcrc64 []byte
+
+	// Bytes of source data in the specified range.
+	SourceRange *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// Specify the transactional md5 for the body, to be validated by the service.
+	TransactionalContentMD5 []byte
+}
+
+// AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method.
+type AppendBlobClientAppendBlockOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// Specify the transactional crc64 for the body, to be validated by the service.
+	TransactionalContentCRC64 []byte
+
+	// Specify the transactional md5 for the body, to be validated by the service.
+	TransactionalContentMD5 []byte
+}
+
+// AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method.
+type AppendBlobClientCreateOptions struct {
+	// Optional. Used to set blob tags in various blob operations.
+	BlobTagsString *string
+
+	// Specifies the date time when the blobs immutability policy is set to expire.
+	ImmutabilityPolicyExpiry *time.Time
+
+	// Specifies the immutability policy mode to set on the blob.
+	ImmutabilityPolicyMode *ImmutabilityPolicySetting
+
+	// Specified if a legal hold should be set on the blob.
+	LegalHold *bool
+
+	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+	// operation will copy the metadata from the source blob or file to the destination
+	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+	// is not copied from the source blob or file. Note that beginning with
+	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+	// Blobs, and Metadata for more information.
+	Metadata map[string]*string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method.
+type AppendBlobClientSealOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// AppendPositionAccessConditions contains a group of parameters for the AppendBlobClient.AppendBlock method.
+type AppendPositionAccessConditions struct {
+	// Optional conditional header, used only for the Append Block operation. A number indicating the byte offset to compare.
+	// Append Block will succeed only if the append position is equal to this number. If
+	// it is not, the request will fail with the AppendPositionConditionNotMet error (HTTP status code 412 - Precondition Failed).
+	AppendPosition *int64
+
+	// Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would
+	// cause the blob to exceed that limit or if the blob size is already greater than
+	// the value specified in this header, the request will fail with MaxBlobSizeConditionNotMet error (HTTP status code 412 -
+	// Precondition Failed).
+	MaxSize *int64
+}
+
+// BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method.
+type BlobClientAbortCopyFromURLOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientAcquireLeaseOptions contains the optional parameters for the BlobClient.AcquireLease method.
+type BlobClientAcquireLeaseOptions struct {
+	// Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is
+	// not in the correct format. See Guid Constructor (String) for a list of valid GUID
+	// string formats.
+	ProposedLeaseID *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method.
+type BlobClientBreakLeaseOptions struct {
+	// For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This
+	// break period is only used if it is shorter than the time remaining on the
+	// lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has
+	// expired, but the lease may be held for longer than the break period. If this
+	// header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses,
+	// and an infinite lease breaks immediately.
+	BreakPeriod *int32
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientChangeLeaseOptions contains the optional parameters for the BlobClient.ChangeLease method.
+type BlobClientChangeLeaseOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientCopyFromURLOptions contains the optional parameters for the BlobClient.CopyFromURL method.
+type BlobClientCopyFromURLOptions struct {
+	// Optional. Used to set blob tags in various blob operations.
+	BlobTagsString *string
+
+	// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+	CopySourceAuthorization *string
+
+	// Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags.
+	CopySourceTags *BlobCopySourceTags
+
+	// Specifies the date time when the blobs immutability policy is set to expire.
+	ImmutabilityPolicyExpiry *time.Time
+
+	// Specifies the immutability policy mode to set on the blob.
+	ImmutabilityPolicyMode *ImmutabilityPolicySetting
+
+	// Specified if a legal hold should be set on the blob.
+	LegalHold *bool
+
+	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+	// operation will copy the metadata from the source blob or file to the destination
+	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+	// is not copied from the source blob or file. Note that beginning with
+	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+	// Blobs, and Metadata for more information.
+	Metadata map[string]*string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// Specify the md5 calculated for the range of bytes that must be read from the copy source.
+	SourceContentMD5 []byte
+
+	// Optional. Indicates the tier to be set on the blob.
+	Tier *AccessTier
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method.
+type BlobClientCreateSnapshotOptions struct {
+	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+	// operation will copy the metadata from the source blob or file to the destination
+	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+	// is not copied from the source blob or file. Note that beginning with
+	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+	// Blobs, and Metadata for more information.
+	Metadata map[string]*string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy
+// method.
+type BlobClientDeleteImmutabilityPolicyOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method.
+type BlobClientDeleteOptions struct {
+	// Required if the blob has associated snapshots. Specify one of the following two options: include: Delete the base blob
+	// and all of its snapshots. only: Delete only the blob's snapshots and not the blob
+	// itself
+	DeleteSnapshots *DeleteSnapshotsOptionType
+
+	// Optional. Only possible value is 'permanent', which specifies to permanently delete a blob if blob soft delete is enabled.
+	DeleteType *DeleteType
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+	Snapshot *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+	// It's for service version 2019-10-10 and newer.
+	VersionID *string
+}
+
+// BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method.
+type BlobClientDownloadOptions struct {
+	// Return only the bytes of the blob in the specified range.
+	Range *string
+
+	// When set to true and specified together with the Range, the service returns the CRC64 hash for the range, as long as the
+	// range is less than or equal to 4 MB in size.
+	RangeGetContentCRC64 *bool
+
+	// When set to true and specified together with the Range, the service returns the MD5 hash for the range, as long as the
+	// range is less than or equal to 4 MB in size.
+	RangeGetContentMD5 *bool
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+	Snapshot *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+	// It's for service version 2019-10-10 and newer.
+	VersionID *string
+}
+
+// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method.
+type BlobClientGetAccountInfoOptions struct {
+	// placeholder for future optional parameters
+}
+
+// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method.
+type BlobClientGetPropertiesOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+	Snapshot *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+	// It's for service version 2019-10-10 and newer.
+	VersionID *string
+}
+
+// BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method.
+type BlobClientGetTagsOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+	Snapshot *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+	// It's for service version 2019-10-10 and newer.
+	VersionID *string
+}
+
+// BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method.
+type BlobClientQueryOptions struct {
+	// the query request
+	QueryRequest *QueryRequest
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+	Snapshot *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method.
+type BlobClientReleaseLeaseOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method.
+type BlobClientRenewLeaseOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method.
+type BlobClientSetExpiryOptions struct {
+	// The time to set the blob to expiry
+	ExpiresOn *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method.
+type BlobClientSetHTTPHeadersOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy method.
+type BlobClientSetImmutabilityPolicyOptions struct {
+	// Specifies the date time when the blobs immutability policy is set to expire.
+	ImmutabilityPolicyExpiry *time.Time
+
+	// Specifies the immutability policy mode to set on the blob.
+	ImmutabilityPolicyMode *ImmutabilityPolicySetting
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method.
+type BlobClientSetLegalHoldOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method.
+type BlobClientSetMetadataOptions struct {
+	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+	// operation will copy the metadata from the source blob or file to the destination
+	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+	// is not copied from the source blob or file. Note that beginning with
+	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+	// Blobs, and Metadata for more information.
+	Metadata map[string]*string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method.
+type BlobClientSetTagsOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// Specify the transactional crc64 for the body, to be validated by the service.
+	TransactionalContentCRC64 []byte
+
+	// Specify the transactional md5 for the body, to be validated by the service.
+	TransactionalContentMD5 []byte
+
+	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+	// It's for service version 2019-10-10 and newer.
+	VersionID *string
+}
+
+// BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method.
+type BlobClientSetTierOptions struct {
+	// Optional: Indicates the priority with which to rehydrate an archived blob.
+	RehydratePriority *RehydratePriority
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+	Snapshot *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+	// It's for service version 2019-10-10 and newer.
+	VersionID *string
+}
+
+// BlobClientStartCopyFromURLOptions contains the optional parameters for the BlobClient.StartCopyFromURL method.
+type BlobClientStartCopyFromURLOptions struct {
+	// Optional. Used to set blob tags in various blob operations.
+	BlobTagsString *string
+
+	// Specifies the date time when the blobs immutability policy is set to expire.
+	ImmutabilityPolicyExpiry *time.Time
+
+	// Specifies the immutability policy mode to set on the blob.
+	ImmutabilityPolicyMode *ImmutabilityPolicySetting
+
+	// Specified if a legal hold should be set on the blob.
+	LegalHold *bool
+
+	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+	// operation will copy the metadata from the source blob or file to the destination
+	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+	// is not copied from the source blob or file. Note that beginning with
+	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+	// Blobs, and Metadata for more information.
+	Metadata map[string]*string
+
+	// Optional: Indicates the priority with which to rehydrate an archived blob.
+	RehydratePriority *RehydratePriority
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// Overrides the sealed state of the destination blob. Service version 2019-12-12 and newer.
+	SealBlob *bool
+
+	// Optional. Indicates the tier to be set on the blob.
+	Tier *AccessTier
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method.
+type BlobClientUndeleteOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
+type BlobHTTPHeaders struct {
+	// Optional. Sets the blob's cache control. If specified, this property is stored with the blob and returned with a read request.
+	BlobCacheControl *string
+
+	// Optional. Sets the blob's Content-Disposition header.
+	BlobContentDisposition *string
+
+	// Optional. Sets the blob's content encoding. If specified, this property is stored with the blob and returned with a read
+	// request.
+	BlobContentEncoding *string
+
+	// Optional. Set the blob's content language. If specified, this property is stored with the blob and returned with a read
+	// request.
+	BlobContentLanguage *string
+
+	// Optional. An MD5 hash of the blob content. Note that this hash is not validated, as the hashes for the individual blocks
+	// were validated when each was uploaded.
+	BlobContentMD5 []byte
+
+	// Optional. Sets the blob's content type. If specified, this property is stored with the blob and returned with a read request.
+	BlobContentType *string
+}
+
+// BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList method.
+type BlockBlobClientCommitBlockListOptions struct {
+	// Optional. Used to set blob tags in various blob operations.
+	BlobTagsString *string
+
+	// Specifies the date time when the blobs immutability policy is set to expire.
+	ImmutabilityPolicyExpiry *time.Time
+
+	// Specifies the immutability policy mode to set on the blob.
+	ImmutabilityPolicyMode *ImmutabilityPolicySetting
+
+	// Specified if a legal hold should be set on the blob.
+	LegalHold *bool
+
+	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+	// operation will copy the metadata from the source blob or file to the destination
+	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+	// is not copied from the source blob or file. Note that beginning with
+	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+	// Blobs, and Metadata for more information.
+	Metadata map[string]*string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// Optional. Indicates the tier to be set on the blob.
+	Tier *AccessTier
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// Specify the transactional crc64 for the body, to be validated by the service.
+	TransactionalContentCRC64 []byte
+
+	// Specify the transactional md5 for the body, to be validated by the service.
+	TransactionalContentMD5 []byte
+}
+
+// BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method.
+type BlockBlobClientGetBlockListOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+	Snapshot *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlockBlobClientPutBlobFromURLOptions contains the optional parameters for the BlockBlobClient.PutBlobFromURL method.
+type BlockBlobClientPutBlobFromURLOptions struct {
+	// Optional. Used to set blob tags in various blob operations.
+	BlobTagsString *string
+
+	// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+	CopySourceAuthorization *string
+
+	// Optional, default is true. Indicates if properties from the source blob should be copied.
+	CopySourceBlobProperties *bool
+
+	// Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags.
+	CopySourceTags *BlobCopySourceTags
+
+	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+	// operation will copy the metadata from the source blob or file to the destination
+	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+	// is not copied from the source blob or file. Note that beginning with
+	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+	// Blobs, and Metadata for more information.
+	Metadata map[string]*string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// Specify the md5 calculated for the range of bytes that must be read from the copy source.
+	SourceContentMD5 []byte
+
+	// Optional. Indicates the tier to be set on the blob.
+	Tier *AccessTier
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// Specify the transactional md5 for the body, to be validated by the service.
+	TransactionalContentMD5 []byte
+}
+
+// BlockBlobClientStageBlockFromURLOptions contains the optional parameters for the BlockBlobClient.StageBlockFromURL method.
+type BlockBlobClientStageBlockFromURLOptions struct {
+	// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+	CopySourceAuthorization *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// Specify the md5 calculated for the range of bytes that must be read from the copy source.
+	SourceContentMD5 []byte
+
+	// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
+	SourceContentcrc64 []byte
+
+	// Bytes of source data in the specified range.
+	SourceRange *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// BlockBlobClientStageBlockOptions contains the optional parameters for the BlockBlobClient.StageBlock method.
+type BlockBlobClientStageBlockOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// Specify the transactional crc64 for the body, to be validated by the service.
+	TransactionalContentCRC64 []byte
+
+	// Specify the transactional md5 for the body, to be validated by the service.
+	TransactionalContentMD5 []byte
+}
+
+// BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method.
+type BlockBlobClientUploadOptions struct {
+	// Optional. Used to set blob tags in various blob operations.
+	BlobTagsString *string
+
+	// Specifies the date time when the blobs immutability policy is set to expire.
+	ImmutabilityPolicyExpiry *time.Time
+
+	// Specifies the immutability policy mode to set on the blob.
+	ImmutabilityPolicyMode *ImmutabilityPolicySetting
+
+	// Specified if a legal hold should be set on the blob.
+	LegalHold *bool
+
+	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+	// operation will copy the metadata from the source blob or file to the destination
+	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+	// is not copied from the source blob or file. Note that beginning with
+	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+	// Blobs, and Metadata for more information.
+	Metadata map[string]*string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// Optional. Indicates the tier to be set on the blob.
+	Tier *AccessTier
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// Specify the transactional crc64 for the body, to be validated by the service.
+	TransactionalContentCRC64 []byte
+
+	// Specify the transactional md5 for the body, to be validated by the service.
+	TransactionalContentMD5 []byte
+}
+
+// ContainerClientAcquireLeaseOptions contains the optional parameters for the ContainerClient.AcquireLease method.
+type ContainerClientAcquireLeaseOptions struct {
+	// Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed lease ID is
+	// not in the correct format. See Guid Constructor (String) for a list of valid GUID
+	// string formats.
+	ProposedLeaseID *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method.
+type ContainerClientBreakLeaseOptions struct {
+	// For a break operation, proposed duration the lease should continue before it is broken, in seconds, between 0 and 60. This
+	// break period is only used if it is shorter than the time remaining on the
+	// lease. If longer, the time remaining on the lease is used. A new lease will not be available before the break period has
+	// expired, but the lease may be held for longer than the break period. If this
+	// header does not appear with a break operation, a fixed-duration lease breaks after the remaining lease period elapses,
+	// and an infinite lease breaks immediately.
+	BreakPeriod *int32
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientChangeLeaseOptions contains the optional parameters for the ContainerClient.ChangeLease method.
+type ContainerClientChangeLeaseOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method.
+type ContainerClientCreateOptions struct {
+	// Specifies whether data in the container may be accessed publicly and the level of access
+	Access *PublicAccessType
+
+	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+	// operation will copy the metadata from the source blob or file to the destination
+	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+	// is not copied from the source blob or file. Note that beginning with
+	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+	// Blobs, and Metadata for more information.
+	Metadata map[string]*string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method.
+type ContainerClientDeleteOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method.
+type ContainerClientFilterBlobsOptions struct {
+	// Include this parameter to specify one or more datasets to include in the response.
+	Include []FilterBlobsIncludeItem
+
+	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+	// operation returns the NextMarker value within the response body if the listing
+	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+	// as the value for the marker parameter in a subsequent call to request the next
+	// page of list items. The marker value is opaque to the client.
+	Marker *string
+
+	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+	// greater than 5000, the server will return up to 5000 items. Note that if the
+	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+	// of the results. For this reason, it is possible that the service will
+	// return fewer results than specified by maxresults, or than the default of 5000.
+	Maxresults *int32
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method.
+type ContainerClientGetAccessPolicyOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method.
+type ContainerClientGetAccountInfoOptions struct {
+	// placeholder for future optional parameters
+}
+
+// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method.
+type ContainerClientGetPropertiesOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager
+// method.
+type ContainerClientListBlobFlatSegmentOptions struct {
+	// Include this parameter to specify one or more datasets to include in the response.
+	Include []ListBlobsIncludeItem
+
+	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+	// operation returns the NextMarker value within the response body if the listing
+	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+	// as the value for the marker parameter in a subsequent call to request the next
+	// page of list items. The marker value is opaque to the client.
+	Marker *string
+
+	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+	// greater than 5000, the server will return up to 5000 items. Note that if the
+	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+	// of the results. For this reason, it is possible that the service will
+	// return fewer results than specified by maxresults, or than the default of 5000.
+	Maxresults *int32
+
+	// Filters the results to return only containers whose name begins with the specified prefix.
+	Prefix *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientListBlobHierarchySegmentOptions contains the optional parameters for the ContainerClient.NewListBlobHierarchySegmentPager
+// method.
+type ContainerClientListBlobHierarchySegmentOptions struct {
+	// Include this parameter to specify one or more datasets to include in the response.
+	Include []ListBlobsIncludeItem
+
+	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+	// operation returns the NextMarker value within the response body if the listing
+	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+	// as the value for the marker parameter in a subsequent call to request the next
+	// page of list items. The marker value is opaque to the client.
+	Marker *string
+
+	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+	// greater than 5000, the server will return up to 5000 items. Note that if the
+	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+	// of the results. For this reason, it is possible that the service will
+	// return fewer results than specified by maxresults, or than the default of 5000.
+	Maxresults *int32
+
+	// Filters the results to return only containers whose name begins with the specified prefix.
+	Prefix *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method.
+type ContainerClientReleaseLeaseOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method.
+type ContainerClientRenameOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// A lease ID for the source path. If specified, the source path must have an active lease and the lease ID must match.
+	SourceLeaseID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method.
+type ContainerClientRenewLeaseOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method.
+type ContainerClientRestoreOptions struct {
+	// Optional. Version 2019-12-12 and later. Specifies the name of the deleted container to restore.
+	DeletedContainerName *string
+
+	// Optional. Version 2019-12-12 and later. Specifies the version of the deleted container to restore.
+	DeletedContainerVersion *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy method.
+type ContainerClientSetAccessPolicyOptions struct {
+	// Specifies whether data in the container may be accessed publicly and the level of access
+	Access *PublicAccessType
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method.
+type ContainerClientSetMetadataOptions struct {
+	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+	// operation will copy the metadata from the source blob or file to the destination
+	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+	// is not copied from the source blob or file. Note that beginning with
+	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+	// Blobs, and Metadata for more information.
+	Metadata map[string]*string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerClientSubmitBatchOptions contains the optional parameters for the ContainerClient.SubmitBatch method.
+type ContainerClientSubmitBatchOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method.
+type ContainerCPKScopeInfo struct {
+	// Optional. Version 2019-07-07 and later. Specifies the default encryption scope to set on the container and use for all
+	// future writes.
+	DefaultEncryptionScope *string
+
+	// Optional. Version 2019-07-07 and newer. If true, prevents any request from specifying a different encryption scope than
+	// the scope set on the container.
+	PreventEncryptionScopeOverride *bool
+}
+
+// CPKInfo contains a group of parameters for the BlobClient.Download method.
+type CPKInfo struct {
+	// The algorithm used to produce the encryption key hash. Currently, the only accepted value is "AES256". Must be provided
+	// if the x-ms-encryption-key header is provided.
+	EncryptionAlgorithm *EncryptionAlgorithmType
+
+	// Optional. Specifies the encryption key to use to encrypt the data provided in the request. If not specified, encryption
+	// is performed with the root account encryption key. For more information, see
+	// Encryption at Rest for Azure Storage Services.
+	EncryptionKey *string
+
+	// The SHA-256 hash of the provided encryption key. Must be provided if the x-ms-encryption-key header is provided.
+	EncryptionKeySHA256 *string
+}
+
+// CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
+type CPKScopeInfo struct {
+	// Optional. Version 2019-07-07 and later. Specifies the name of the encryption scope to use to encrypt the data provided
+	// in the request. If not specified, encryption is performed with the default
+	// account encryption scope. For more information, see Encryption at Rest for Azure Storage Services.
+	EncryptionScope *string
+}
+
+// LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
+type LeaseAccessConditions struct {
+	// If specified, the operation only succeeds if the resource's lease is active and matches this ID.
+	LeaseID *string
+}
+
+// ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
+type ModifiedAccessConditions struct {
+	// Specify an ETag value to operate only on blobs with a matching value.
+	IfMatch *azcore.ETag
+
+	// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+	IfModifiedSince *time.Time
+
+	// Specify an ETag value to operate only on blobs without a matching value.
+	IfNoneMatch *azcore.ETag
+
+	// Specify a SQL where clause on blob tags to operate only on blobs with a matching value.
+	IfTags *string
+
+	// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+	IfUnmodifiedSince *time.Time
+}
+
+// PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method.
+type PageBlobClientClearPagesOptions struct {
+	// Return only the bytes of the blob in the specified range.
+	Range *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// PageBlobClientCopyIncrementalOptions contains the optional parameters for the PageBlobClient.CopyIncremental method.
+type PageBlobClientCopyIncrementalOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// PageBlobClientCreateOptions contains the optional parameters for the PageBlobClient.Create method.
+type PageBlobClientCreateOptions struct {
+	// Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
+	// the sequence number must be between 0 and 2^63 - 1.
+	BlobSequenceNumber *int64
+
+	// Optional. Used to set blob tags in various blob operations.
+	BlobTagsString *string
+
+	// Specifies the date time when the blobs immutability policy is set to expire.
+	ImmutabilityPolicyExpiry *time.Time
+
+	// Specifies the immutability policy mode to set on the blob.
+	ImmutabilityPolicyMode *ImmutabilityPolicySetting
+
+	// Specified if a legal hold should be set on the blob.
+	LegalHold *bool
+
+	// Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the
+	// operation will copy the metadata from the source blob or file to the destination
+	// blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata
+	// is not copied from the source blob or file. Note that beginning with
+	// version 2009-09-19, metadata names must adhere to the naming rules for C# identifiers. See Naming and Referencing Containers,
+	// Blobs, and Metadata for more information.
+	Metadata map[string]*string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// Optional. Indicates the tier to be set on the page blob.
+	Tier *PremiumPageBlobAccessTier
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager
+// method.
+type PageBlobClientGetPageRangesDiffOptions struct {
+	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+	// operation returns the NextMarker value within the response body if the listing
+	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+	// as the value for the marker parameter in a subsequent call to request the next
+	// page of list items. The marker value is opaque to the client.
+	Marker *string
+
+	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+	// greater than 5000, the server will return up to 5000 items. Note that if the
+	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+	// of the results. For this reason, it is possible that the service will
+	// return fewer results than specified by maxresults, or than the default of 5000.
+	Maxresults *int32
+
+	// Optional. This header is only supported in service versions 2019-04-19 and after and specifies the URL of a previous snapshot
+	// of the target blob. The response will only contain pages that were changed
+	// between the target blob and its previous snapshot.
+	PrevSnapshotURL *string
+
+	// Optional in version 2015-07-08 and newer. The prevsnapshot parameter is a DateTime value that specifies that the response
+	// will contain only pages that were changed between target blob and previous
+	// snapshot. Changed pages include both updated and cleared pages. The target blob may be a snapshot, as long as the snapshot
+	// specified by prevsnapshot is the older of the two. Note that incremental
+	// snapshots are currently supported only for blobs created on or after January 1, 2016.
+	Prevsnapshot *string
+
+	// Return only the bytes of the blob in the specified range.
+	Range *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+	Snapshot *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager method.
+type PageBlobClientGetPageRangesOptions struct {
+	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+	// operation returns the NextMarker value within the response body if the listing
+	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+	// as the value for the marker parameter in a subsequent call to request the next
+	// page of list items. The marker value is opaque to the client.
+	Marker *string
+
+	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+	// greater than 5000, the server will return up to 5000 items. Note that if the
+	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+	// of the results. For this reason, it is possible that the service will
+	// return fewer results than specified by maxresults, or than the default of 5000.
+	Maxresults *int32
+
+	// Return only the bytes of the blob in the specified range.
+	Range *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+	// information on working with blob snapshots, see Creating a Snapshot of a Blob.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+	Snapshot *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method.
+type PageBlobClientResizeOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber
+// method.
+type PageBlobClientUpdateSequenceNumberOptions struct {
+	// Set for page blobs only. The sequence number is a user-controlled value that you can use to track requests. The value of
+	// the sequence number must be between 0 and 2^63 - 1.
+	BlobSequenceNumber *int64
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// PageBlobClientUploadPagesFromURLOptions contains the optional parameters for the PageBlobClient.UploadPagesFromURL method.
+type PageBlobClientUploadPagesFromURLOptions struct {
+	// Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source.
+	CopySourceAuthorization *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// Specify the md5 calculated for the range of bytes that must be read from the copy source.
+	SourceContentMD5 []byte
+
+	// Specify the crc64 calculated for the range of bytes that must be read from the copy source.
+	SourceContentcrc64 []byte
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method.
+type PageBlobClientUploadPagesOptions struct {
+	// Return only the bytes of the blob in the specified range.
+	Range *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+
+	// Specify the transactional crc64 for the body, to be validated by the service.
+	TransactionalContentCRC64 []byte
+
+	// Specify the transactional md5 for the body, to be validated by the service.
+	TransactionalContentMD5 []byte
+}
+
+// SequenceNumberAccessConditions contains a group of parameters for the PageBlobClient.UploadPages method.
+type SequenceNumberAccessConditions struct {
+	// Specify this header value to operate only on a blob if it has the specified sequence number.
+	IfSequenceNumberEqualTo *int64
+
+	// Specify this header value to operate only on a blob if it has a sequence number less than the specified.
+	IfSequenceNumberLessThan *int64
+
+	// Specify this header value to operate only on a blob if it has a sequence number less than or equal to the specified.
+	IfSequenceNumberLessThanOrEqualTo *int64
+}
+
+// ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method.
+type ServiceClientFilterBlobsOptions struct {
+	// Include this parameter to specify one or more datasets to include in the response.
+	Include []FilterBlobsIncludeItem
+
+	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+	// operation returns the NextMarker value within the response body if the listing
+	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+	// as the value for the marker parameter in a subsequent call to request the next
+	// page of list items. The marker value is opaque to the client.
+	Marker *string
+
+	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+	// greater than 5000, the server will return up to 5000 items. Note that if the
+	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+	// of the results. For this reason, it is possible that the service will
+	// return fewer results than specified by maxresults, or than the default of 5000.
+	Maxresults *int32
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method.
+type ServiceClientGetAccountInfoOptions struct {
+	// placeholder for future optional parameters
+}
+
+// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method.
+type ServiceClientGetPropertiesOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method.
+type ServiceClientGetStatisticsOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey method.
+type ServiceClientGetUserDelegationKeyOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager
+// method.
+type ServiceClientListContainersSegmentOptions struct {
+	// Include this parameter to specify that the container's metadata be returned as part of the response body.
+	Include []ListContainersIncludeType
+
+	// A string value that identifies the portion of the list of containers to be returned with the next listing operation. The
+	// operation returns the NextMarker value within the response body if the listing
+	// operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used
+	// as the value for the marker parameter in a subsequent call to request the next
+	// page of list items. The marker value is opaque to the client.
+	Marker *string
+
+	// Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value
+	// greater than 5000, the server will return up to 5000 items. Note that if the
+	// listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder
+	// of the results. For this reason, it is possible that the service will
+	// return fewer results than specified by maxresults, or than the default of 5000.
+	Maxresults *int32
+
+	// Filters the results to return only containers whose name begins with the specified prefix.
+	Prefix *string
+
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method.
+type ServiceClientSetPropertiesOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method.
+type ServiceClientSubmitBatchOptions struct {
+	// Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+	// analytics logging is enabled.
+	RequestID *string
+
+	// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+	// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+	Timeout *int32
+}
+
+// SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL method.
+type SourceModifiedAccessConditions struct {
+	// Specify an ETag value to operate only on blobs with a matching value.
+	SourceIfMatch *azcore.ETag
+
+	// Specify this header value to operate only on a blob if it has been modified since the specified date/time.
+	SourceIfModifiedSince *time.Time
+
+	// Specify an ETag value to operate only on blobs without a matching value.
+	SourceIfNoneMatch *azcore.ETag
+
+	// Specify a SQL where clause on blob tags to operate only on blobs with a matching value.
+	SourceIfTags *string
+
+	// Specify this header value to operate only on a blob if it has not been modified since the specified date/time.
+	SourceIfUnmodifiedSince *time.Time
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go
index b41644c99f..bfa9883f5c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
@@ -41,18 +40,21 @@ type PageBlobClient struct {
 //     method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int64, options *PageBlobClientClearPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientClearPagesResponse, error) {
+	var err error
 	req, err := client.clearPagesCreateRequest(ctx, contentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
 	if err != nil {
 		return PageBlobClientClearPagesResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return PageBlobClientClearPagesResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return PageBlobClientClearPagesResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return PageBlobClientClearPagesResponse{}, err
 	}
-	return client.clearPagesHandleResponse(resp)
+	resp, err := client.clearPagesHandleResponse(httpResp)
+	return resp, err
 }
 
 // clearPagesCreateRequest creates the ClearPages request.
@@ -122,30 +124,6 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte
 // clearPagesHandleResponse handles the ClearPages response.
 func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (PageBlobClientClearPagesResponse, error) {
 	result := PageBlobClientClearPagesResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return PageBlobClientClearPagesResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return PageBlobClientClearPagesResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
-	}
-	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
-		contentCRC64, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return PageBlobClientClearPagesResponse{}, err
-		}
-		result.ContentCRC64 = contentCRC64
-	}
 	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
 		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
 		if err != nil {
@@ -156,11 +134,19 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
+	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
+		contentCRC64, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return PageBlobClientClearPagesResponse{}, err
+		}
+		result.ContentCRC64 = contentCRC64
 	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return PageBlobClientClearPagesResponse{}, err
+		}
+		result.ContentMD5 = contentMD5
 	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
@@ -169,6 +155,22 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return PageBlobClientClearPagesResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -187,18 +189,21 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag
 //     method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource string, options *PageBlobClientCopyIncrementalOptions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCopyIncrementalResponse, error) {
+	var err error
 	req, err := client.copyIncrementalCreateRequest(ctx, copySource, options, modifiedAccessConditions)
 	if err != nil {
 		return PageBlobClientCopyIncrementalResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return PageBlobClientCopyIncrementalResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusAccepted) {
-		return PageBlobClientCopyIncrementalResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {
+		err = runtime.NewResponseError(httpResp)
+		return PageBlobClientCopyIncrementalResponse{}, err
 	}
-	return client.copyIncrementalHandleResponse(resp)
+	resp, err := client.copyIncrementalHandleResponse(httpResp)
+	return resp, err
 }
 
 // copyIncrementalCreateRequest creates the CopyIncremental request.
@@ -240,6 +245,22 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context,
 // copyIncrementalHandleResponse handles the CopyIncremental response.
 func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) (PageBlobClientCopyIncrementalResponse, error) {
 	result := PageBlobClientCopyIncrementalResponse{}
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-id"); val != "" {
+		result.CopyID = &val
+	}
+	if val := resp.Header.Get("x-ms-copy-status"); val != "" {
+		result.CopyStatus = (*CopyStatusType)(&val)
+	}
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return PageBlobClientCopyIncrementalResponse{}, err
+		}
+		result.Date = &date
+	}
 	if val := resp.Header.Get("ETag"); val != "" {
 		result.ETag = (*azcore.ETag)(&val)
 	}
@@ -250,28 +271,12 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response)
 		}
 		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
-	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
 	}
 	if val := resp.Header.Get("x-ms-version"); val != "" {
 		result.Version = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return PageBlobClientCopyIncrementalResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-copy-id"); val != "" {
-		result.CopyID = &val
-	}
-	if val := resp.Header.Get("x-ms-copy-status"); val != "" {
-		result.CopyStatus = (*CopyStatusType)(&val)
-	}
 	return result, nil
 }
 
@@ -289,18 +294,21 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response)
 //   - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, blobContentLength int64, options *PageBlobClientCreateOptions, blobHTTPHeaders *BlobHTTPHeaders, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientCreateResponse, error) {
+	var err error
 	req, err := client.createCreateRequest(ctx, contentLength, blobContentLength, options, blobHTTPHeaders, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
 	if err != nil {
 		return PageBlobClientCreateResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return PageBlobClientCreateResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return PageBlobClientCreateResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return PageBlobClientCreateResponse{}, err
 	}
-	return client.createHandleResponse(resp)
+	resp, err := client.createHandleResponse(httpResp)
+	return resp, err
 }
 
 // createCreateRequest creates the Create request.
@@ -401,15 +409,8 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe
 // createHandleResponse handles the Create response.
 func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlobClientCreateResponse, error) {
 	result := PageBlobClientCreateResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return PageBlobClientCreateResponse{}, err
-		}
-		result.LastModified = &lastModified
+	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
+		result.ClientRequestID = &val
 	}
 	if val := resp.Header.Get("Content-MD5"); val != "" {
 		contentMD5, err := base64.StdEncoding.DecodeString(val)
@@ -418,8 +419,35 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo
 		}
 		result.ContentMD5 = contentMD5
 	}
-	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
-		result.ClientRequestID = &val
+	if val := resp.Header.Get("Date"); val != "" {
+		date, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return PageBlobClientCreateResponse{}, err
+		}
+		result.Date = &date
+	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
+	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
+		isServerEncrypted, err := strconv.ParseBool(val)
+		if err != nil {
+			return PageBlobClientCreateResponse{}, err
+		}
+		result.IsServerEncrypted = &isServerEncrypted
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return PageBlobClientCreateResponse{}, err
+		}
+		result.LastModified = &lastModified
 	}
 	if val := resp.Header.Get("x-ms-request-id"); val != "" {
 		result.RequestID = &val
@@ -430,26 +458,6 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo
 	if val := resp.Header.Get("x-ms-version-id"); val != "" {
 		result.VersionID = &val
 	}
-	if val := resp.Header.Get("Date"); val != "" {
-		date, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return PageBlobClientCreateResponse{}, err
-		}
-		result.Date = &date
-	}
-	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
-		isServerEncrypted, err := strconv.ParseBool(val)
-		if err != nil {
-			return PageBlobClientCreateResponse{}, err
-		}
-		result.IsServerEncrypted = &isServerEncrypted
-	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
-	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
-	}
 	return result, nil
 }
 
@@ -467,23 +475,16 @@ func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPa
 			return page.NextMarker != nil && len(*page.NextMarker) > 0
 		},
 		Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesResponse) (PageBlobClientGetPageRangesResponse, error) {
-			var req *policy.Request
-			var err error
-			if page == nil {
-				req, err = client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions)
-			} else {
-				req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker)
+			nextLink := ""
+			if page != nil {
+				nextLink = *page.NextMarker
 			}
+			resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) {
+				return client.GetPageRangesCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions)
+			}, nil)
 			if err != nil {
 				return PageBlobClientGetPageRangesResponse{}, err
 			}
-			resp, err := client.internal.Pipeline().Do(req)
-			if err != nil {
-				return PageBlobClientGetPageRangesResponse{}, err
-			}
-			if !runtime.HasStatusCode(resp, http.StatusOK) {
-				return PageBlobClientGetPageRangesResponse{}, runtime.NewResponseError(resp)
-			}
 			return client.GetPageRangesHandleResponse(resp)
 		},
 	})
@@ -542,16 +543,6 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op
 // GetPageRangesHandleResponse handles the GetPageRanges response.
 func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesResponse, error) {
 	result := PageBlobClientGetPageRangesResponse{}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return PageBlobClientGetPageRangesResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
 	if val := resp.Header.Get("x-ms-blob-content-length"); val != "" {
 		blobContentLength, err := strconv.ParseInt(val, 10, 64)
 		if err != nil {
@@ -562,12 +553,6 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -575,6 +560,22 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return PageBlobClientGetPageRangesResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil {
 		return PageBlobClientGetPageRangesResponse{}, err
 	}
@@ -595,23 +596,16 @@ func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientG
 			return page.NextMarker != nil && len(*page.NextMarker) > 0
 		},
 		Fetcher: func(ctx context.Context, page *PageBlobClientGetPageRangesDiffResponse) (PageBlobClientGetPageRangesDiffResponse, error) {
-			var req *policy.Request
-			var err error
-			if page == nil {
-				req, err = client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions)
-			} else {
-				req, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextMarker)
+			nextLink := ""
+			if page != nil {
+				nextLink = *page.NextMarker
 			}
+			resp, err := runtime.FetcherForNextLink(ctx, client.internal.Pipeline(), nextLink, func(ctx context.Context) (*policy.Request, error) {
+				return client.GetPageRangesDiffCreateRequest(ctx, options, leaseAccessConditions, modifiedAccessConditions)
+			}, nil)
 			if err != nil {
 				return PageBlobClientGetPageRangesDiffResponse{}, err
 			}
-			resp, err := client.internal.Pipeline().Do(req)
-			if err != nil {
-				return PageBlobClientGetPageRangesDiffResponse{}, err
-			}
-			if !runtime.HasStatusCode(resp, http.StatusOK) {
-				return PageBlobClientGetPageRangesDiffResponse{}, runtime.NewResponseError(resp)
-			}
 			return client.GetPageRangesDiffHandleResponse(resp)
 		},
 	})
@@ -676,16 +670,6 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context
 // GetPageRangesDiffHandleResponse handles the GetPageRangesDiff response.
 func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Response) (PageBlobClientGetPageRangesDiffResponse, error) {
 	result := PageBlobClientGetPageRangesDiffResponse{}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return PageBlobClientGetPageRangesDiffResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
 	if val := resp.Header.Get("x-ms-blob-content-length"); val != "" {
 		blobContentLength, err := strconv.ParseInt(val, 10, 64)
 		if err != nil {
@@ -696,12 +680,6 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -709,6 +687,22 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return PageBlobClientGetPageRangesDiffResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	if err := runtime.UnmarshalAsXML(resp, &result.PageList); err != nil {
 		return PageBlobClientGetPageRangesDiffResponse{}, err
 	}
@@ -727,18 +721,21 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons
 //   - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int64, options *PageBlobClientResizeOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientResizeResponse, error) {
+	var err error
 	req, err := client.resizeCreateRequest(ctx, blobContentLength, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, modifiedAccessConditions)
 	if err != nil {
 		return PageBlobClientResizeResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return PageBlobClientResizeResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return PageBlobClientResizeResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return PageBlobClientResizeResponse{}, err
 	}
-	return client.resizeHandleResponse(resp)
+	resp, err := client.resizeHandleResponse(httpResp)
+	return resp, err
 }
 
 // resizeCreateRequest creates the Resize request.
@@ -795,16 +792,6 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte
 // resizeHandleResponse handles the Resize response.
 func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlobClientResizeResponse, error) {
 	result := PageBlobClientResizeResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return PageBlobClientResizeResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
 	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
 		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
 		if err != nil {
@@ -815,12 +802,6 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -828,6 +809,22 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return PageBlobClientResizeResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -842,18 +839,21 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo
 //   - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequenceNumberAction SequenceNumberActionType, options *PageBlobClientUpdateSequenceNumberOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUpdateSequenceNumberResponse, error) {
+	var err error
 	req, err := client.updateSequenceNumberCreateRequest(ctx, sequenceNumberAction, options, leaseAccessConditions, modifiedAccessConditions)
 	if err != nil {
 		return PageBlobClientUpdateSequenceNumberResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return PageBlobClientUpdateSequenceNumberResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return PageBlobClientUpdateSequenceNumberResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return PageBlobClientUpdateSequenceNumberResponse{}, err
 	}
-	return client.updateSequenceNumberHandleResponse(resp)
+	resp, err := client.updateSequenceNumberHandleResponse(httpResp)
+	return resp, err
 }
 
 // updateSequenceNumberCreateRequest creates the UpdateSequenceNumber request.
@@ -901,16 +901,6 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont
 // updateSequenceNumberHandleResponse handles the UpdateSequenceNumber response.
 func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Response) (PageBlobClientUpdateSequenceNumberResponse, error) {
 	result := PageBlobClientUpdateSequenceNumberResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return PageBlobClientUpdateSequenceNumberResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
 	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
 		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
 		if err != nil {
@@ -921,12 +911,6 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -934,6 +918,22 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return PageBlobClientUpdateSequenceNumberResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -951,18 +951,21 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp
 //     method.
 //   - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
 func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int64, body io.ReadSeekCloser, options *PageBlobClientUploadPagesOptions, leaseAccessConditions *LeaseAccessConditions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (PageBlobClientUploadPagesResponse, error) {
+	var err error
 	req, err := client.uploadPagesCreateRequest(ctx, contentLength, body, options, leaseAccessConditions, cpkInfo, cpkScopeInfo, sequenceNumberAccessConditions, modifiedAccessConditions)
 	if err != nil {
 		return PageBlobClientUploadPagesResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return PageBlobClientUploadPagesResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return PageBlobClientUploadPagesResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return PageBlobClientUploadPagesResponse{}, err
 	}
-	return client.uploadPagesHandleResponse(resp)
+	resp, err := client.uploadPagesHandleResponse(httpResp)
+	return resp, err
 }
 
 // uploadPagesCreateRequest creates the UploadPages request.
@@ -1041,30 +1044,6 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont
 // uploadPagesHandleResponse handles the UploadPages response.
 func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (PageBlobClientUploadPagesResponse, error) {
 	result := PageBlobClientUploadPagesResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
-		if err != nil {
-			return PageBlobClientUploadPagesResponse{}, err
-		}
-		result.LastModified = &lastModified
-	}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return PageBlobClientUploadPagesResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
-	}
-	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
-		contentCRC64, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return PageBlobClientUploadPagesResponse{}, err
-		}
-		result.ContentCRC64 = contentCRC64
-	}
 	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
 		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
 		if err != nil {
@@ -1075,11 +1054,19 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
+	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
+		contentCRC64, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return PageBlobClientUploadPagesResponse{}, err
+		}
+		result.ContentCRC64 = contentCRC64
 	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
+		if err != nil {
+			return PageBlobClientUploadPagesResponse{}, err
+		}
+		result.ContentMD5 = contentMD5
 	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
@@ -1088,6 +1075,15 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
+	}
+	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
+		result.EncryptionKeySHA256 = &val
+	}
+	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
+		result.EncryptionScope = &val
+	}
 	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
 		isServerEncrypted, err := strconv.ParseBool(val)
 		if err != nil {
@@ -1095,11 +1091,18 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa
 		}
 		result.IsServerEncrypted = &isServerEncrypted
 	}
-	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
-		result.EncryptionKeySHA256 = &val
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return PageBlobClientUploadPagesResponse{}, err
+		}
+		result.LastModified = &lastModified
 	}
-	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
-		result.EncryptionScope = &val
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
 	}
 	return result, nil
 }
@@ -1126,18 +1129,21 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa
 //   - SourceModifiedAccessConditions - SourceModifiedAccessConditions contains a group of parameters for the BlobClient.StartCopyFromURL
 //     method.
 func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL string, sourceRange string, contentLength int64, rangeParam string, options *PageBlobClientUploadPagesFromURLOptions, cpkInfo *CPKInfo, cpkScopeInfo *CPKScopeInfo, leaseAccessConditions *LeaseAccessConditions, sequenceNumberAccessConditions *SequenceNumberAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, sourceModifiedAccessConditions *SourceModifiedAccessConditions) (PageBlobClientUploadPagesFromURLResponse, error) {
+	var err error
 	req, err := client.uploadPagesFromURLCreateRequest(ctx, sourceURL, sourceRange, contentLength, rangeParam, options, cpkInfo, cpkScopeInfo, leaseAccessConditions, sequenceNumberAccessConditions, modifiedAccessConditions, sourceModifiedAccessConditions)
 	if err != nil {
 		return PageBlobClientUploadPagesFromURLResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return PageBlobClientUploadPagesFromURLResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusCreated) {
-		return PageBlobClientUploadPagesFromURLResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusCreated) {
+		err = runtime.NewResponseError(httpResp)
+		return PageBlobClientUploadPagesFromURLResponse{}, err
 	}
-	return client.uploadPagesFromURLHandleResponse(resp)
+	resp, err := client.uploadPagesFromURLHandleResponse(httpResp)
+	return resp, err
 }
 
 // uploadPagesFromURLCreateRequest creates the UploadPagesFromURL request.
@@ -1228,22 +1234,12 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex
 // uploadPagesFromURLHandleResponse handles the UploadPagesFromURL response.
 func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Response) (PageBlobClientUploadPagesFromURLResponse, error) {
 	result := PageBlobClientUploadPagesFromURLResponse{}
-	if val := resp.Header.Get("ETag"); val != "" {
-		result.ETag = (*azcore.ETag)(&val)
-	}
-	if val := resp.Header.Get("Last-Modified"); val != "" {
-		lastModified, err := time.Parse(time.RFC1123, val)
+	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
+		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
 		if err != nil {
 			return PageBlobClientUploadPagesFromURLResponse{}, err
 		}
-		result.LastModified = &lastModified
-	}
-	if val := resp.Header.Get("Content-MD5"); val != "" {
-		contentMD5, err := base64.StdEncoding.DecodeString(val)
-		if err != nil {
-			return PageBlobClientUploadPagesFromURLResponse{}, err
-		}
-		result.ContentMD5 = contentMD5
+		result.BlobSequenceNumber = &blobSequenceNumber
 	}
 	if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
 		contentCRC64, err := base64.StdEncoding.DecodeString(val)
@@ -1252,18 +1248,12 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon
 		}
 		result.ContentCRC64 = contentCRC64
 	}
-	if val := resp.Header.Get("x-ms-blob-sequence-number"); val != "" {
-		blobSequenceNumber, err := strconv.ParseInt(val, 10, 64)
+	if val := resp.Header.Get("Content-MD5"); val != "" {
+		contentMD5, err := base64.StdEncoding.DecodeString(val)
 		if err != nil {
 			return PageBlobClientUploadPagesFromURLResponse{}, err
 		}
-		result.BlobSequenceNumber = &blobSequenceNumber
-	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
+		result.ContentMD5 = contentMD5
 	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
@@ -1272,12 +1262,8 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon
 		}
 		result.Date = &date
 	}
-	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
-		isServerEncrypted, err := strconv.ParseBool(val)
-		if err != nil {
-			return PageBlobClientUploadPagesFromURLResponse{}, err
-		}
-		result.IsServerEncrypted = &isServerEncrypted
+	if val := resp.Header.Get("ETag"); val != "" {
+		result.ETag = (*azcore.ETag)(&val)
 	}
 	if val := resp.Header.Get("x-ms-encryption-key-sha256"); val != "" {
 		result.EncryptionKeySHA256 = &val
@@ -1285,5 +1271,25 @@ func (client *PageBlobClient) uploadPagesFromURLHandleResponse(resp *http.Respon
 	if val := resp.Header.Get("x-ms-encryption-scope"); val != "" {
 		result.EncryptionScope = &val
 	}
+	if val := resp.Header.Get("x-ms-request-server-encrypted"); val != "" {
+		isServerEncrypted, err := strconv.ParseBool(val)
+		if err != nil {
+			return PageBlobClientUploadPagesFromURLResponse{}, err
+		}
+		result.IsServerEncrypted = &isServerEncrypted
+	}
+	if val := resp.Header.Get("Last-Modified"); val != "" {
+		lastModified, err := time.Parse(time.RFC1123, val)
+		if err != nil {
+			return PageBlobClientUploadPagesFromURLResponse{}, err
+		}
+		result.LastModified = &lastModified
+	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go
index b52664c938..738d23c8f1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
@@ -656,18 +655,20 @@ type BlobClientGetPropertiesResponse struct {
 
 // BlobClientGetTagsResponse contains the response from method BlobClient.GetTags.
 type BlobClientGetTagsResponse struct {
+	// Blob tags
 	BlobTags
+
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// Date contains the information returned from the Date header response.
-	Date *time.Time `xml:"Date"`
+	Date *time.Time
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // BlobClientQueryResponse contains the response from method BlobClient.Query.
@@ -1051,29 +1052,30 @@ type BlockBlobClientCommitBlockListResponse struct {
 // BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList.
 type BlockBlobClientGetBlockListResponse struct {
 	BlockList
+
 	// BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
-	BlobContentLength *int64 `xml:"BlobContentLength"`
+	BlobContentLength *int64
 
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// ContentType contains the information returned from the Content-Type header response.
-	ContentType *string `xml:"ContentType"`
+	ContentType *string
 
 	// Date contains the information returned from the Date header response.
-	Date *time.Time `xml:"Date"`
+	Date *time.Time
 
 	// ETag contains the information returned from the ETag header response.
-	ETag *azcore.ETag `xml:"ETag"`
+	ETag *azcore.ETag
 
 	// LastModified contains the information returned from the Last-Modified header response.
-	LastModified *time.Time `xml:"LastModified"`
+	LastModified *time.Time
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL.
@@ -1318,45 +1320,47 @@ type ContainerClientDeleteResponse struct {
 
 // ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs.
 type ContainerClientFilterBlobsResponse struct {
+	// The result of a Filter Blobs API call
 	FilterBlobSegment
+
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// Date contains the information returned from the Date header response.
-	Date *time.Time `xml:"Date"`
+	Date *time.Time
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy.
 type ContainerClientGetAccessPolicyResponse struct {
 	// BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response.
-	BlobPublicAccess *PublicAccessType `xml:"BlobPublicAccess"`
+	BlobPublicAccess *PublicAccessType
 
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// Date contains the information returned from the Date header response.
-	Date *time.Time `xml:"Date"`
+	Date *time.Time
 
 	// ETag contains the information returned from the ETag header response.
-	ETag *azcore.ETag `xml:"ETag"`
+	ETag *azcore.ETag
 
 	// LastModified contains the information returned from the Last-Modified header response.
-	LastModified *time.Time `xml:"LastModified"`
+	LastModified *time.Time
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// a collection of signed identifiers
 	SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"`
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo.
@@ -1434,40 +1438,44 @@ type ContainerClientGetPropertiesResponse struct {
 
 // ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager.
 type ContainerClientListBlobFlatSegmentResponse struct {
+	// An enumeration of blobs
 	ListBlobsFlatSegmentResponse
+
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// ContentType contains the information returned from the Content-Type header response.
-	ContentType *string `xml:"ContentType"`
+	ContentType *string
 
 	// Date contains the information returned from the Date header response.
-	Date *time.Time `xml:"Date"`
+	Date *time.Time
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager.
 type ContainerClientListBlobHierarchySegmentResponse struct {
+	// An enumeration of blobs
 	ListBlobsHierarchySegmentResponse
+
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// ContentType contains the information returned from the Content-Type header response.
-	ContentType *string `xml:"ContentType"`
+	ContentType *string
 
 	// Date contains the information returned from the Date header response.
-	Date *time.Time `xml:"Date"`
+	Date *time.Time
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease.
@@ -1697,52 +1705,56 @@ type PageBlobClientCreateResponse struct {
 
 // PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager.
 type PageBlobClientGetPageRangesDiffResponse struct {
+	// the list of pages
 	PageList
+
 	// BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
-	BlobContentLength *int64 `xml:"BlobContentLength"`
+	BlobContentLength *int64
 
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// Date contains the information returned from the Date header response.
-	Date *time.Time `xml:"Date"`
+	Date *time.Time
 
 	// ETag contains the information returned from the ETag header response.
-	ETag *azcore.ETag `xml:"ETag"`
+	ETag *azcore.ETag
 
 	// LastModified contains the information returned from the Last-Modified header response.
-	LastModified *time.Time `xml:"LastModified"`
+	LastModified *time.Time
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager.
 type PageBlobClientGetPageRangesResponse struct {
+	// the list of pages
 	PageList
+
 	// BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
-	BlobContentLength *int64 `xml:"BlobContentLength"`
+	BlobContentLength *int64
 
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// Date contains the information returned from the Date header response.
-	Date *time.Time `xml:"Date"`
+	Date *time.Time
 
 	// ETag contains the information returned from the ETag header response.
-	ETag *azcore.ETag `xml:"ETag"`
+	ETag *azcore.ETag
 
 	// LastModified contains the information returned from the Last-Modified header response.
-	LastModified *time.Time `xml:"LastModified"`
+	LastModified *time.Time
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize.
@@ -1870,18 +1882,20 @@ type PageBlobClientUploadPagesResponse struct {
 
 // ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs.
 type ServiceClientFilterBlobsResponse struct {
+	// The result of a Filter Blobs API call
 	FilterBlobSegment
+
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// Date contains the information returned from the Date header response.
-	Date *time.Time `xml:"Date"`
+	Date *time.Time
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo.
@@ -1910,60 +1924,68 @@ type ServiceClientGetAccountInfoResponse struct {
 
 // ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties.
 type ServiceClientGetPropertiesResponse struct {
+	// Storage Service Properties.
 	StorageServiceProperties
+
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics.
 type ServiceClientGetStatisticsResponse struct {
+	// Stats for the storage service.
 	StorageServiceStats
+
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// Date contains the information returned from the Date header response.
-	Date *time.Time `xml:"Date"`
+	Date *time.Time
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey.
 type ServiceClientGetUserDelegationKeyResponse struct {
+	// A user delegation key
 	UserDelegationKey
+
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// Date contains the information returned from the Date header response.
-	Date *time.Time `xml:"Date"`
+	Date *time.Time
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager.
 type ServiceClientListContainersSegmentResponse struct {
+	// An enumeration of containers
 	ListContainersSegmentResponse
+
 	// ClientRequestID contains the information returned from the x-ms-client-request-id header response.
-	ClientRequestID *string `xml:"ClientRequestID"`
+	ClientRequestID *string
 
 	// RequestID contains the information returned from the x-ms-request-id header response.
-	RequestID *string `xml:"RequestID"`
+	RequestID *string
 
 	// Version contains the information returned from the x-ms-version header response.
-	Version *string `xml:"Version"`
+	Version *string
 }
 
 // ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go
index faeefdc532..9a73b7301b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
@@ -38,18 +37,21 @@ type ServiceClient struct {
 //   - where - Filters the results to return only to return only blobs whose tags match the specified expression.
 //   - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method.
 func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) {
+	var err error
 	req, err := client.filterBlobsCreateRequest(ctx, where, options)
 	if err != nil {
 		return ServiceClientFilterBlobsResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ServiceClientFilterBlobsResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ServiceClientFilterBlobsResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ServiceClientFilterBlobsResponse{}, err
 	}
-	return client.filterBlobsHandleResponse(resp)
+	resp, err := client.filterBlobsHandleResponse(httpResp)
+	return resp, err
 }
 
 // filterBlobsCreateRequest creates the FilterBlobs request.
@@ -88,12 +90,6 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -101,6 +97,12 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil {
 		return ServiceClientFilterBlobsResponse{}, err
 	}
@@ -113,18 +115,21 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser
 // Generated from API version 2023-08-03
 //   - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method.
 func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) {
+	var err error
 	req, err := client.getAccountInfoCreateRequest(ctx, options)
 	if err != nil {
 		return ServiceClientGetAccountInfoResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ServiceClientGetAccountInfoResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ServiceClientGetAccountInfoResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ServiceClientGetAccountInfoResponse{}, err
 	}
-	return client.getAccountInfoHandleResponse(resp)
+	resp, err := client.getAccountInfoHandleResponse(httpResp)
+	return resp, err
 }
 
 // getAccountInfoCreateRequest creates the GetAccountInfo request.
@@ -145,15 +150,12 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op
 // getAccountInfoHandleResponse handles the GetAccountInfo response.
 func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (ServiceClientGetAccountInfoResponse, error) {
 	result := ServiceClientGetAccountInfoResponse{}
+	if val := resp.Header.Get("x-ms-account-kind"); val != "" {
+		result.AccountKind = (*AccountKind)(&val)
+	}
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -161,12 +163,6 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (
 		}
 		result.Date = &date
 	}
-	if val := resp.Header.Get("x-ms-sku-name"); val != "" {
-		result.SKUName = (*SKUName)(&val)
-	}
-	if val := resp.Header.Get("x-ms-account-kind"); val != "" {
-		result.AccountKind = (*AccountKind)(&val)
-	}
 	if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" {
 		isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val)
 		if err != nil {
@@ -174,6 +170,15 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (
 		}
 		result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-sku-name"); val != "" {
+		result.SKUName = (*SKUName)(&val)
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	return result, nil
 }
 
@@ -184,18 +189,21 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (
 // Generated from API version 2023-08-03
 //   - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method.
 func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) {
+	var err error
 	req, err := client.getPropertiesCreateRequest(ctx, options)
 	if err != nil {
 		return ServiceClientGetPropertiesResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ServiceClientGetPropertiesResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ServiceClientGetPropertiesResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ServiceClientGetPropertiesResponse{}, err
 	}
-	return client.getPropertiesHandleResponse(resp)
+	resp, err := client.getPropertiesHandleResponse(httpResp)
+	return resp, err
 }
 
 // getPropertiesCreateRequest creates the GetProperties request.
@@ -244,18 +252,21 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S
 // Generated from API version 2023-08-03
 //   - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method.
 func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) {
+	var err error
 	req, err := client.getStatisticsCreateRequest(ctx, options)
 	if err != nil {
 		return ServiceClientGetStatisticsResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ServiceClientGetStatisticsResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ServiceClientGetStatisticsResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ServiceClientGetStatisticsResponse{}, err
 	}
-	return client.getStatisticsHandleResponse(resp)
+	resp, err := client.getStatisticsHandleResponse(httpResp)
+	return resp, err
 }
 
 // getStatisticsCreateRequest creates the GetStatistics request.
@@ -285,12 +296,6 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -298,6 +303,12 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	if err := runtime.UnmarshalAsXML(resp, &result.StorageServiceStats); err != nil {
 		return ServiceClientGetStatisticsResponse{}, err
 	}
@@ -313,18 +324,21 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S
 //   - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey
 //     method.
 func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo KeyInfo, options *ServiceClientGetUserDelegationKeyOptions) (ServiceClientGetUserDelegationKeyResponse, error) {
+	var err error
 	req, err := client.getUserDelegationKeyCreateRequest(ctx, keyInfo, options)
 	if err != nil {
 		return ServiceClientGetUserDelegationKeyResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ServiceClientGetUserDelegationKeyResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusOK) {
-		return ServiceClientGetUserDelegationKeyResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusOK) {
+		err = runtime.NewResponseError(httpResp)
+		return ServiceClientGetUserDelegationKeyResponse{}, err
 	}
-	return client.getUserDelegationKeyHandleResponse(resp)
+	resp, err := client.getUserDelegationKeyHandleResponse(httpResp)
+	return resp, err
 }
 
 // getUserDelegationKeyCreateRequest creates the GetUserDelegationKey request.
@@ -357,12 +371,6 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo
 	if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
 		result.ClientRequestID = &val
 	}
-	if val := resp.Header.Get("x-ms-request-id"); val != "" {
-		result.RequestID = &val
-	}
-	if val := resp.Header.Get("x-ms-version"); val != "" {
-		result.Version = &val
-	}
 	if val := resp.Header.Get("Date"); val != "" {
 		date, err := time.Parse(time.RFC1123, val)
 		if err != nil {
@@ -370,6 +378,12 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo
 		}
 		result.Date = &date
 	}
+	if val := resp.Header.Get("x-ms-request-id"); val != "" {
+		result.RequestID = &val
+	}
+	if val := resp.Header.Get("x-ms-version"); val != "" {
+		result.Version = &val
+	}
 	if err := runtime.UnmarshalAsXML(resp, &result.UserDelegationKey); err != nil {
 		return ServiceClientGetUserDelegationKeyResponse{}, err
 	}
@@ -441,18 +455,21 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp
 //   - storageServiceProperties - The StorageService properties.
 //   - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method.
 func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) {
+	var err error
 	req, err := client.setPropertiesCreateRequest(ctx, storageServiceProperties, options)
 	if err != nil {
 		return ServiceClientSetPropertiesResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ServiceClientSetPropertiesResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusAccepted) {
-		return ServiceClientSetPropertiesResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {
+		err = runtime.NewResponseError(httpResp)
+		return ServiceClientSetPropertiesResponse{}, err
 	}
-	return client.setPropertiesHandleResponse(resp)
+	resp, err := client.setPropertiesHandleResponse(httpResp)
+	return resp, err
 }
 
 // setPropertiesCreateRequest creates the SetProperties request.
@@ -504,18 +521,21 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S
 //   - body - Initial data
 //   - options - ServiceClientSubmitBatchOptions contains the optional parameters for the ServiceClient.SubmitBatch method.
 func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int64, multipartContentType string, body io.ReadSeekCloser, options *ServiceClientSubmitBatchOptions) (ServiceClientSubmitBatchResponse, error) {
+	var err error
 	req, err := client.submitBatchCreateRequest(ctx, contentLength, multipartContentType, body, options)
 	if err != nil {
 		return ServiceClientSubmitBatchResponse{}, err
 	}
-	resp, err := client.internal.Pipeline().Do(req)
+	httpResp, err := client.internal.Pipeline().Do(req)
 	if err != nil {
 		return ServiceClientSubmitBatchResponse{}, err
 	}
-	if !runtime.HasStatusCode(resp, http.StatusAccepted) {
-		return ServiceClientSubmitBatchResponse{}, runtime.NewResponseError(resp)
+	if !runtime.HasStatusCode(httpResp, http.StatusAccepted) {
+		err = runtime.NewResponseError(httpResp)
+		return ServiceClientSubmitBatchResponse{}, err
 	}
-	return client.submitBatchHandleResponse(resp)
+	resp, err := client.submitBatchHandleResponse(httpResp)
+	return resp, err
 }
 
 // submitBatchCreateRequest creates the SubmitBatch request.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go
index 4b4d51aa39..5866503297 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
@@ -15,29 +14,29 @@ import (
 )
 
 const (
-	rfc1123JSON = `"` + time.RFC1123 + `"`
+	dateTimeRFC1123JSON = `"` + time.RFC1123 + `"`
 )
 
-type timeRFC1123 time.Time
+type dateTimeRFC1123 time.Time
 
-func (t timeRFC1123) MarshalJSON() ([]byte, error) {
-	b := []byte(time.Time(t).Format(rfc1123JSON))
+func (t dateTimeRFC1123) MarshalJSON() ([]byte, error) {
+	b := []byte(time.Time(t).Format(dateTimeRFC1123JSON))
 	return b, nil
 }
 
-func (t timeRFC1123) MarshalText() ([]byte, error) {
+func (t dateTimeRFC1123) MarshalText() ([]byte, error) {
 	b := []byte(time.Time(t).Format(time.RFC1123))
 	return b, nil
 }
 
-func (t *timeRFC1123) UnmarshalJSON(data []byte) error {
-	p, err := time.Parse(rfc1123JSON, strings.ToUpper(string(data)))
-	*t = timeRFC1123(p)
+func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error {
+	p, err := time.Parse(dateTimeRFC1123JSON, strings.ToUpper(string(data)))
+	*t = dateTimeRFC1123(p)
 	return err
 }
 
-func (t *timeRFC1123) UnmarshalText(data []byte) error {
+func (t *dateTimeRFC1123) UnmarshalText(data []byte) error {
 	p, err := time.Parse(time.RFC1123, string(data))
-	*t = timeRFC1123(p)
+	*t = dateTimeRFC1123(p)
 	return err
 }
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go
index 1ce9d62116..82b370133f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go
@@ -3,9 +3,8 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
@@ -15,45 +14,45 @@ import (
 	"time"
 )
 
-const (
-	utcLayoutJSON = `"2006-01-02T15:04:05.999999999"`
-	utcLayout     = "2006-01-02T15:04:05.999999999"
-	rfc3339JSON   = `"` + time.RFC3339Nano + `"`
-)
-
 // Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases.
 var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`)
 
-type timeRFC3339 time.Time
+const (
+	utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"`
+	utcDateTime     = "2006-01-02T15:04:05.999999999"
+	dateTimeJSON    = `"` + time.RFC3339Nano + `"`
+)
 
-func (t timeRFC3339) MarshalJSON() (json []byte, err error) {
+type dateTimeRFC3339 time.Time
+
+func (t dateTimeRFC3339) MarshalJSON() ([]byte, error) {
 	tt := time.Time(t)
 	return tt.MarshalJSON()
 }
 
-func (t timeRFC3339) MarshalText() (text []byte, err error) {
+func (t dateTimeRFC3339) MarshalText() ([]byte, error) {
 	tt := time.Time(t)
 	return tt.MarshalText()
 }
 
-func (t *timeRFC3339) UnmarshalJSON(data []byte) error {
-	layout := utcLayoutJSON
+func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error {
+	layout := utcDateTimeJSON
 	if tzOffsetRegex.Match(data) {
-		layout = rfc3339JSON
+		layout = dateTimeJSON
 	}
 	return t.Parse(layout, string(data))
 }
 
-func (t *timeRFC3339) UnmarshalText(data []byte) (err error) {
-	layout := utcLayout
+func (t *dateTimeRFC3339) UnmarshalText(data []byte) error {
+	layout := utcDateTime
 	if tzOffsetRegex.Match(data) {
 		layout = time.RFC3339Nano
 	}
 	return t.Parse(layout, string(data))
 }
 
-func (t *timeRFC3339) Parse(layout, value string) error {
+func (t *dateTimeRFC3339) Parse(layout, value string) error {
 	p, err := time.Parse(layout, strings.ToUpper(value))
-	*t = timeRFC3339(p)
+	*t = dateTimeRFC3339(p)
 	return err
 }
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go
index 144ea18e1a..1bd0e4de05 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go
@@ -3,14 +3,16 @@
 
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
 // Changes may cause incorrect behavior and will be lost if the code is regenerated.
-// DO NOT EDIT.
 
 package generated
 
 import (
 	"encoding/xml"
+	"errors"
+	"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
+	"io"
 	"strings"
 )
 
@@ -19,22 +21,32 @@ type additionalProperties map[string]*string
 // UnmarshalXML implements the xml.Unmarshaler interface for additionalProperties.
 func (ap *additionalProperties) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
 	tokName := ""
-	for t, err := d.Token(); err == nil; t, err = d.Token() {
+	tokValue := ""
+	for {
+		t, err := d.Token()
+		if errors.Is(err, io.EOF) {
+			break
+		} else if err != nil {
+			return err
+		}
 		switch tt := t.(type) {
 		case xml.StartElement:
 			tokName = strings.ToLower(tt.Name.Local)
-			break
+			tokValue = ""
 		case xml.CharData:
+			if tokName == "" {
+				continue
+			}
+			tokValue = string(tt)
+		case xml.EndElement:
 			if tokName == "" {
 				continue
 			}
 			if *ap == nil {
 				*ap = additionalProperties{}
 			}
-			s := string(tt)
-			(*ap)[tokName] = &s
+			(*ap)[tokName] = to.Ptr(tokValue)
 			tokName = ""
-			break
 		}
 	}
 	return nil
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go
index 1de60999ec..c131facf7b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go
@@ -144,9 +144,6 @@ func ParseConnectionString(connectionString string) (ParsedConnectionString, err
 
 // SerializeBlobTags converts tags to generated.BlobTags
 func SerializeBlobTags(tagsMap map[string]string) *generated.BlobTags {
-	if len(tagsMap) == 0 {
-		return nil
-	}
 	blobTagSet := make([]*generated.BlobTag, 0)
 	for key, val := range tagsMap {
 		newKey, newVal := key, val
@@ -257,3 +254,27 @@ func IsIPEndpointStyle(host string) bool {
 	}
 	return net.ParseIP(host) != nil
 }
+
+// ReadAtLeast reads from r into buf until it has read at least min bytes.
+// It returns the number of bytes copied and an error.
+// The EOF error is returned if no bytes were read or
+// EOF happened after reading fewer than min bytes.
+// If min is greater than the length of buf, ReadAtLeast returns ErrShortBuffer.
+// On return, n >= min if and only if err == nil.
+// If r returns an error having read at least min bytes, the error is dropped.
+// This method is same as io.ReadAtLeast except that it does not
+// return io.ErrUnexpectedEOF when fewer than min bytes are read.
+func ReadAtLeast(r io.Reader, buf []byte, min int) (n int, err error) {
+	if len(buf) < min {
+		return 0, io.ErrShortBuffer
+	}
+	for n < min && err == nil {
+		var nn int
+		nn, err = r.Read(buf[n:])
+		n += nn
+	}
+	if n >= min {
+		err = nil
+	}
+	return
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go
index 7e534cee18..14e90a1fd6 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go
@@ -8,6 +8,7 @@ package pageblob
 
 import (
 	"context"
+	"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
 	"io"
 	"net/http"
 	"net/url"
@@ -426,6 +427,12 @@ func (pb *Client) CopyFromURL(ctx context.Context, copySource string, o *blob.Co
 	return pb.BlobClient().CopyFromURL(ctx, copySource, o)
 }
 
+// GetSASURL is a convenience method for generating a SAS token for the currently pointed at Page blob.
+// It can only be used if the credential supplied during creation was a SharedKeyCredential.
+func (pb *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o *blob.GetSASURLOptions) (string, error) {
+	return pb.BlobClient().GetSASURL(permissions, expiry, o)
+}
+
 // Concurrent Download Functions -----------------------------------------------------------------------------------------
 
 // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
index 11263822be..2221e60c43 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
@@ -82,6 +82,39 @@ func isMatchingScopes(scopesOne []string, scopesTwo string) bool {
 	return scopeCounter == len(scopesOne)
 }
 
+// needsUpgrade returns true if the given key follows the v1.0 schema i.e.,
+// it contains an uppercase character (v1.1+ keys are all lowercase)
+func needsUpgrade(key string) bool {
+	for _, r := range key {
+		if 'A' <= r && r <= 'Z' {
+			return true
+		}
+	}
+	return false
+}
+
+// upgrade a v1.0 cache item by adding a v1.1+ item having the same value and deleting
+// the v1.0 item. Callers must hold an exclusive lock on m.
+func upgrade[T any](m map[string]T, k string) T {
+	v1_1Key := strings.ToLower(k)
+	v, ok := m[k]
+	if !ok {
+		// another goroutine did the upgrade while this one was waiting for the write lock
+		return m[v1_1Key]
+	}
+	if v2, ok := m[v1_1Key]; ok {
+		// cache has an equivalent v1.1+ item, which we prefer because we know it was added
+		// by a newer version of the module and is therefore more likely to remain valid.
+		// The v1.0 item may have expired because only v1.0 or earlier would update it.
+		v = v2
+	} else {
+		// add an equivalent item according to the v1.1 schema
+		m[v1_1Key] = v
+	}
+	delete(m, k)
+	return v
+}
+
 // Read reads a storage token from the cache if it exists.
 func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) {
 	tr := TokenResponse{}
@@ -255,21 +288,25 @@ func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info)
 
 func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken {
 	m.contractMu.RLock()
-	defer m.contractMu.RUnlock()
 	// TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens.
 	// this shows up as the dominating node in a profile. for real-world scenarios this likely isn't
 	// an issue, however if it does become a problem then we know where to look.
-	for _, at := range m.contract.AccessTokens {
+	for k, at := range m.contract.AccessTokens {
 		if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID {
-			if (at.TokenType == tokenType && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) {
-				if checkAlias(at.Environment, envAliases) {
-					if isMatchingScopes(scopes, at.Scopes) {
-						return at
+			if (strings.EqualFold(at.TokenType, tokenType) && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) {
+				if checkAlias(at.Environment, envAliases) && isMatchingScopes(scopes, at.Scopes) {
+					m.contractMu.RUnlock()
+					if needsUpgrade(k) {
+						m.contractMu.Lock()
+						defer m.contractMu.Unlock()
+						at = upgrade(m.contract.AccessTokens, k)
 					}
+					return at
 				}
 			}
 		}
 	}
+	m.contractMu.RUnlock()
 	return AccessToken{}
 }
 
@@ -310,15 +347,21 @@ func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID,
 	// If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response).
 	// https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95
 	m.contractMu.RLock()
-	defer m.contractMu.RUnlock()
 	for _, matcher := range matchers {
-		for _, rt := range m.contract.RefreshTokens {
+		for k, rt := range m.contract.RefreshTokens {
 			if matcher(rt) {
+				m.contractMu.RUnlock()
+				if needsUpgrade(k) {
+					m.contractMu.Lock()
+					defer m.contractMu.Unlock()
+					rt = upgrade(m.contract.RefreshTokens, k)
+				}
 				return rt, nil
 			}
 		}
 	}
 
+	m.contractMu.RUnlock()
 	return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found")
 }
 
@@ -340,14 +383,20 @@ func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) erro
 
 func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) {
 	m.contractMu.RLock()
-	defer m.contractMu.RUnlock()
-	for _, idt := range m.contract.IDTokens {
+	for k, idt := range m.contract.IDTokens {
 		if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID {
 			if checkAlias(idt.Environment, envAliases) {
+				m.contractMu.RUnlock()
+				if needsUpgrade(k) {
+					m.contractMu.Lock()
+					defer m.contractMu.Unlock()
+					idt = upgrade(m.contract.IDTokens, k)
+				}
 				return idt, nil
 			}
 		}
 	}
+	m.contractMu.RUnlock()
 	return IDToken{}, fmt.Errorf("token not found")
 }
 
@@ -386,7 +435,6 @@ func (m *Manager) Account(homeAccountID string) shared.Account {
 
 func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) {
 	m.contractMu.RLock()
-	defer m.contractMu.RUnlock()
 
 	// You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key.
 	// We only use a map because the storage contract shared between all language implementations says use a map.
@@ -394,11 +442,18 @@ func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm s
 	// a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup
 	// or just statically check.  Since the design is to have a storage.Manager per user, the amount of keys stored
 	// is really low (say 2).  Each hash is more expensive than the entire iteration.
-	for _, acc := range m.contract.Accounts {
+	for k, acc := range m.contract.Accounts {
 		if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm {
+			m.contractMu.RUnlock()
+			if needsUpgrade(k) {
+				m.contractMu.Lock()
+				defer m.contractMu.Unlock()
+				acc = upgrade(m.contract.Accounts, k)
+			}
 			return acc, nil
 		}
 	}
+	m.contractMu.RUnlock()
 	return shared.Account{}, fmt.Errorf("account not found")
 }
 
@@ -412,13 +467,18 @@ func (m *Manager) writeAccount(account shared.Account) error {
 
 func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) {
 	m.contractMu.RLock()
-	defer m.contractMu.RUnlock()
-
-	for _, app := range m.contract.AppMetaData {
+	for k, app := range m.contract.AppMetaData {
 		if checkAlias(app.Environment, envAliases) && app.ClientID == clientID {
+			m.contractMu.RUnlock()
+			if needsUpgrade(k) {
+				m.contractMu.Lock()
+				defer m.contractMu.Unlock()
+				app = upgrade(m.contract.AppMetaData, k)
+			}
 			return app, nil
 		}
 	}
+	m.contractMu.RUnlock()
 	return AppMetaData{}, fmt.Errorf("not found")
 }
 
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
index 2221b3d339..e346ff3dff 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
@@ -51,7 +51,7 @@ type AuthenticationScheme = authority.AuthenticationScheme
 
 type Account = shared.Account
 
-var errNoAccount = errors.New("no account was specified with public.WithAccount(), or the specified account is invalid")
+var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid")
 
 // clientOptions configures the Client's behavior.
 type clientOptions struct {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
index 1ee54cfe0f..2264200c16 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
@@ -170,8 +170,7 @@ func NewConfig() *Config {
 	return &Config{}
 }
 
-// Copy will return a shallow copy of the Config object. If any additional
-// configurations are provided they will be merged into the new config returned.
+// Copy will return a shallow copy of the Config object.
 func (c Config) Copy() Config {
 	cp := c
 	return cp
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
index 9a844f30e5..66d0963030 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
@@ -3,4 +3,4 @@
 package aws
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.24.0"
+const goModuleVersion = "1.24.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
index 722ca34c6a..dc703d482d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
@@ -328,10 +328,12 @@ func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresO
 		middleware.LogAttempts = options.LogRetryAttempts
 	})
 
-	if err := stack.Finalize.Add(attempt, smithymiddle.After); err != nil {
+	// index retry to before signing, if signing exists
+	if err := stack.Finalize.Insert(attempt, "Signing", smithymiddle.Before); err != nil {
 		return err
 	}
-	if err := stack.Finalize.Add(&MetricsHeader{}, smithymiddle.After); err != nil {
+
+	if err := stack.Finalize.Insert(&MetricsHeader{}, attempt.ID(), smithymiddle.After); err != nil {
 		return err
 	}
 	return nil
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
index 79eae3632e..0cd31f00ec 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
@@ -1,3 +1,15 @@
+# v1.26.4 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.2 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v1.26.1 (2023-12-08)
 
 * **Bug Fix**: Correct loading of [services *] sections into shared config.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
index b7c325d3ea..6de4538a4d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
@@ -3,4 +3,4 @@
 package config
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.26.1"
+const goModuleVersion = "1.26.4"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
index dd7af71d18..6671a41c92 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
@@ -1,3 +1,15 @@
+# v1.16.15 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.14 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.13 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v1.16.12 (2023-12-08)
 
 * **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go
new file mode 100644
index 0000000000..c3f5dadcec
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go
@@ -0,0 +1,48 @@
+package client
+
+import (
+	"context"
+	"github.com/aws/smithy-go/middleware"
+)
+
+type getIdentityMiddleware struct {
+	options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+	return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+	return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
+
+type resolveAuthSchemeMiddleware struct {
+	operation string
+	options   Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+	return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
index df0e7575c4..9a869f8954 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
@@ -101,6 +101,7 @@ func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput
 	stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After)
 	stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After)
 	stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After)
+	addProtocolFinalizerMiddlewares(stack, options, "GetCredentials")
 	retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer})
 	middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID)
 	smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go
new file mode 100644
index 0000000000..748ee67244
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+	"context"
+	"github.com/aws/smithy-go/middleware"
+)
+
+type resolveEndpointV2Middleware struct {
+	options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+	return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
index ddb28a66d1..f2820d20ea 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
@@ -146,3 +146,19 @@ func stof(code int) smithy.ErrorFault {
 	}
 	return smithy.FaultClient
 }
+
+func addProtocolFinalizerMiddlewares(stack *smithymiddleware.Stack, options Options, operation string) error {
+	if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, smithymiddleware.Before); err != nil {
+		return fmt.Errorf("add ResolveAuthScheme: %w", err)
+	}
+	if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", smithymiddleware.After); err != nil {
+		return fmt.Errorf("add GetIdentity: %w", err)
+	}
+	if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", smithymiddleware.After); err != nil {
+		return fmt.Errorf("add ResolveEndpointV2: %w", err)
+	}
+	if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", smithymiddleware.After); err != nil {
+		return fmt.Errorf("add Signing: %w", err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
index ec3eb5f6e8..c8bcd2dceb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
@@ -3,4 +3,4 @@
 package credentials
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.16.12"
+const goModuleVersion = "1.16.15"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
index eef77e9d5b..40c317a967 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.14.11 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v1.14.10 (2023-12-07)
 
 * **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
index 9e3bdb0e66..af58b6bb10 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
@@ -56,6 +56,7 @@ type GetDynamicDataOutput struct {
 func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error {
 	return addAPIRequestMiddleware(stack,
 		options,
+		"GetDynamicData",
 		buildGetDynamicDataPath,
 		buildGetDynamicDataOutput)
 }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
index 24845dccd6..5111cc90ca 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
@@ -53,6 +53,7 @@ type GetIAMInfoOutput struct {
 func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error {
 	return addAPIRequestMiddleware(stack,
 		options,
+		"GetIAMInfo",
 		buildGetIAMInfoPath,
 		buildGetIAMInfoOutput,
 	)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
index a87758ed30..dc8c09edf0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
@@ -54,6 +54,7 @@ type GetInstanceIdentityDocumentOutput struct {
 func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error {
 	return addAPIRequestMiddleware(stack,
 		options,
+		"GetInstanceIdentityDocument",
 		buildGetInstanceIdentityDocumentPath,
 		buildGetInstanceIdentityDocumentOutput,
 	)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
index cb0ce4c000..869bfc9feb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
@@ -56,6 +56,7 @@ type GetMetadataOutput struct {
 func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error {
 	return addAPIRequestMiddleware(stack,
 		options,
+		"GetMetadata",
 		buildGetMetadataPath,
 		buildGetMetadataOutput)
 }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
index 7b9b48912a..8c0572bb5c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
@@ -45,6 +45,7 @@ type GetRegionOutput struct {
 func addGetRegionMiddleware(stack *middleware.Stack, options Options) error {
 	return addAPIRequestMiddleware(stack,
 		options,
+		"GetRegion",
 		buildGetInstanceIdentityDocumentPath,
 		buildGetRegionOutput,
 	)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
index 841f802c1a..1f9ee97a5b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
@@ -49,6 +49,7 @@ func addGetTokenMiddleware(stack *middleware.Stack, options Options) error {
 	err := addRequestMiddleware(stack,
 		options,
 		"PUT",
+		"GetToken",
 		buildGetTokenPath,
 		buildGetTokenOutput)
 	if err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
index 88aa61e9ad..8903697244 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
@@ -45,6 +45,7 @@ type GetUserDataOutput struct {
 func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error {
 	return addAPIRequestMiddleware(stack,
 		options,
+		"GetUserData",
 		buildGetUserDataPath,
 		buildGetUserDataOutput)
 }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go
new file mode 100644
index 0000000000..ad283cf825
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go
@@ -0,0 +1,48 @@
+package imds
+
+import (
+	"context"
+	"github.com/aws/smithy-go/middleware"
+)
+
+type getIdentityMiddleware struct {
+	options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+	return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+	return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
+
+type resolveAuthSchemeMiddleware struct {
+	operation string
+	options   Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+	return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go
new file mode 100644
index 0000000000..d7540da348
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go
@@ -0,0 +1,20 @@
+package imds
+
+import (
+	"context"
+	"github.com/aws/smithy-go/middleware"
+)
+
+type resolveEndpointV2Middleware struct {
+	options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+	return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+	return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
index ce3e311186..0d747b213f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
@@ -3,4 +3,4 @@
 package imds
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.14.10"
+const goModuleVersion = "1.14.11"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
index c8abd64916..fc948c27d8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
@@ -17,10 +17,11 @@ import (
 
 func addAPIRequestMiddleware(stack *middleware.Stack,
 	options Options,
+	operation string,
 	getPath func(interface{}) (string, error),
 	getOutput func(*smithyhttp.Response) (interface{}, error),
 ) (err error) {
-	err = addRequestMiddleware(stack, options, "GET", getPath, getOutput)
+	err = addRequestMiddleware(stack, options, "GET", operation, getPath, getOutput)
 	if err != nil {
 		return err
 	}
@@ -44,6 +45,7 @@ func addAPIRequestMiddleware(stack *middleware.Stack,
 func addRequestMiddleware(stack *middleware.Stack,
 	options Options,
 	method string,
+	operation string,
 	getPath func(interface{}) (string, error),
 	getOutput func(*smithyhttp.Response) (interface{}, error),
 ) (err error) {
@@ -101,6 +103,10 @@ func addRequestMiddleware(stack *middleware.Stack,
 		return err
 	}
 
+	if err := addProtocolFinalizerMiddlewares(stack, options, operation); err != nil {
+		return fmt.Errorf("add protocol finalizers: %w", err)
+	}
+
 	// Retry support
 	return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{
 		Retryer:          options.Retryer,
@@ -283,3 +289,19 @@ func appendURIPath(base, add string) string {
 	}
 	return reqPath
 }
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+	if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+		return fmt.Errorf("add ResolveAuthScheme: %w", err)
+	}
+	if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+		return fmt.Errorf("add GetIdentity: %w", err)
+	}
+	if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+		return fmt.Errorf("add ResolveEndpointV2: %w", err)
+	}
+	if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil {
+		return fmt.Errorf("add Signing: %w", err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
index 64776bda64..4ab05fda6b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
@@ -1,3 +1,23 @@
+# v1.15.12 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.11 (2024-01-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.9 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.8 (2023-12-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v1.15.7 (2023-12-08)
 
 * **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
index dc9bdc397d..ab1332ac2e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
@@ -3,4 +3,4 @@
 package manager
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.15.7"
+const goModuleVersion = "1.15.12"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
index 5ceb3b82f4..dc87ec4102 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.2.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v1.2.9 (2023-12-07)
 
 * **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
index da7d0d813e..41ee0bfbe3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
@@ -3,4 +3,4 @@
 package configsources
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.2.9"
+const goModuleVersion = "1.2.10"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
index ab107ca551..f376f6908a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
@@ -50,6 +50,9 @@
       "ca-central-1" : {
         "description" : "Canada (Central)"
       },
+      "ca-west-1" : {
+        "description" : "Canada West (Calgary)"
+      },
       "eu-central-1" : {
         "description" : "Europe (Frankfurt)"
       },
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
index 761cc992b8..e0265474c4 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v2.5.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v2.5.9 (2023-12-07)
 
 * **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
index caabf668d4..bec2c6a1e9 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
@@ -3,4 +3,4 @@
 package endpoints
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "2.5.9"
+const goModuleVersion = "2.5.10"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
index 982bc97c2a..8aa94972d3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.2.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v1.2.9 (2023-12-07)
 
 * **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
index 4bfe4abd06..2a5888c1b9 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
@@ -3,4 +3,4 @@
 package v4a
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.2.9"
+const goModuleVersion = "1.2.10"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md
index d27305250e..8f97403617 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.2.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v1.2.9 (2023-12-07)
 
 * **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go
index 8076821c41..a88534d2a7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go
@@ -3,4 +3,4 @@
 package checksum
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.2.9"
+const goModuleVersion = "1.2.10"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go
index 610e7ca80e..1b727acbe1 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_add.go
@@ -90,6 +90,19 @@ func AddInputMiddleware(stack *middleware.Stack, options InputMiddlewareOptions)
 		return err
 	}
 
+	// If trailing checksum is not supported no need for finalize handler to be added.
+	if options.EnableTrailingChecksum {
+		trailerMiddleware := &addInputChecksumTrailer{
+			EnableTrailingChecksum:           inputChecksum.EnableTrailingChecksum,
+			RequireChecksum:                  inputChecksum.RequireChecksum,
+			EnableComputePayloadHash:         inputChecksum.EnableComputePayloadHash,
+			EnableDecodedContentLengthHeader: inputChecksum.EnableDecodedContentLengthHeader,
+		}
+		if err := stack.Finalize.Insert(trailerMiddleware, "Retry", middleware.After); err != nil {
+			return err
+		}
+	}
+
 	return nil
 }
 
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go
index c7740658ae..7ffca33f0e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/middleware_compute_input_checksum.go
@@ -75,6 +75,8 @@ type computeInputPayloadChecksum struct {
 	useTrailer bool
 }
 
+type useTrailer struct{}
+
 // ID provides the middleware's identifier.
 func (m *computeInputPayloadChecksum) ID() string {
 	return "AWSChecksum:ComputeInputPayloadChecksum"
@@ -178,15 +180,9 @@ func (m *computeInputPayloadChecksum) HandleFinalize(
 				// ContentSHA256Header middleware handles the header
 				ctx = v4.SetPayloadHash(ctx, streamingUnsignedPayloadTrailerPayloadHash)
 			}
-
 			m.useTrailer = true
-			mw := &addInputChecksumTrailer{
-				EnableTrailingChecksum:           m.EnableTrailingChecksum,
-				RequireChecksum:                  m.RequireChecksum,
-				EnableComputePayloadHash:         m.EnableComputePayloadHash,
-				EnableDecodedContentLengthHeader: m.EnableDecodedContentLengthHeader,
-			}
-			return mw.HandleFinalize(ctx, in, next)
+			ctx = middleware.WithStackValue(ctx, useTrailer{}, true)
+			return next.HandleFinalize(ctx, in)
 		}
 
 		// If trailing checksums are not enabled but protocol is still HTTPS
@@ -268,6 +264,9 @@ func (m *addInputChecksumTrailer) HandleFinalize(
 ) (
 	out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
 ) {
+	if enabled, _ := middleware.GetStackValue(ctx, useTrailer{}).(bool); !enabled {
+		return next.HandleFinalize(ctx, in)
+	}
 	req, ok := in.Request.(*smithyhttp.Request)
 	if !ok {
 		return out, metadata, computeInputTrailingChecksumError{
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
index 1191b30c69..a65890b58f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.10.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v1.10.9 (2023-12-07)
 
 * **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
index aacb4dd248..073e8866b7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
@@ -3,4 +3,4 @@
 package presignedurl
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.10.9"
+const goModuleVersion = "1.10.10"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md
index 3b15285f3f..c4df217651 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.16.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v1.16.9 (2023-12-07)
 
 * **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go
index 5f16c58593..986affe186 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go
@@ -3,4 +3,4 @@
 package s3shared
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.16.9"
+const goModuleVersion = "1.16.10"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
index c16b702a6e..f83de2a5ef 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
@@ -1,3 +1,19 @@
+# v1.48.0 (2024-01-05)
+
+* **Feature**: Support smithy sigv4a trait for codegen.
+
+# v1.47.8 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.47.7 (2023-12-20)
+
+* No change notes available for this release.
+
+# v1.47.6 (2023-12-18)
+
+* No change notes available for this release.
+
 # v1.47.5 (2023-12-08)
 
 * **Bug Fix**: Add non-vhostable buckets to request path when using legacy V1 endpoint resolver.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go
index a3fe93b7f2..5e5f27b2d7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go
@@ -777,6 +777,12 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op
 	if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok {
 		stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID())
 	}
+	if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok {
+		stack.Finalize.Remove((*retry.Attempt)(nil).ID())
+	}
+	if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok {
+		stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID())
+	}
 	stack.Deserialize.Clear()
 	stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID())
 	stack.Build.Remove("UserAgent")
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go
index 8a4f832f7f..6ef631bd38 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/auth.go
@@ -155,7 +155,15 @@ func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
 			}(),
 		},
 
-		{SchemeID: smithyauth.SchemeIDSigV4A},
+		{
+			SchemeID: smithyauth.SchemeIDSigV4A,
+			SignerProperties: func() smithy.Properties {
+				var props smithy.Properties
+				smithyhttp.SetSigV4ASigningName(&props, "s3")
+				smithyhttp.SetSigV4ASigningRegions(&props, []string{params.Region})
+				return props
+			}(),
+		},
 	}
 }
 
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
index 3b7c6ddca6..77e3ee12fd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
@@ -3,4 +3,4 @@
 package s3
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.47.5"
+const goModuleVersion = "1.48.0"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go
index c7e5f6d2b2..f3e6b0751d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints/endpoints.go
@@ -282,6 +282,27 @@ var defaultPartitions = endpoints.Partitions{
 			}: {
 				Hostname: "s3.dualstack.ca-central-1.amazonaws.com",
 			},
+			endpoints.EndpointKey{
+				Region: "ca-west-1",
+			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region:  "ca-west-1",
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname: "s3-fips.ca-west-1.amazonaws.com",
+			},
+			endpoints.EndpointKey{
+				Region:  "ca-west-1",
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com",
+			},
+			endpoints.EndpointKey{
+				Region:  "ca-west-1",
+				Variant: endpoints.DualStackVariant,
+			}: {
+				Hostname: "s3.dualstack.ca-west-1.amazonaws.com",
+			},
 			endpoints.EndpointKey{
 				Region: "eu-central-1",
 			}: endpoints.Endpoint{},
@@ -367,6 +388,15 @@ var defaultPartitions = endpoints.Partitions{
 				},
 				Deprecated: aws.TrueTernary,
 			},
+			endpoints.EndpointKey{
+				Region: "fips-ca-west-1",
+			}: endpoints.Endpoint{
+				Hostname: "s3-fips.ca-west-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "ca-west-1",
+				},
+				Deprecated: aws.TrueTernary,
+			},
 			endpoints.EndpointKey{
 				Region: "fips-us-east-1",
 			}: endpoints.Endpoint{
@@ -632,15 +662,61 @@ var defaultPartitions = endpoints.Partitions{
 		RegionRegex:    partitionRegexp.AwsIso,
 		IsRegionalized: true,
 		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "fips-us-iso-east-1",
+			}: endpoints.Endpoint{
+				Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-iso-east-1",
+				},
+				Deprecated: aws.TrueTernary,
+			},
+			endpoints.EndpointKey{
+				Region: "fips-us-iso-west-1",
+			}: endpoints.Endpoint{
+				Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-iso-west-1",
+				},
+				Deprecated: aws.TrueTernary,
+			},
 			endpoints.EndpointKey{
 				Region: "us-iso-east-1",
 			}: endpoints.Endpoint{
 				Protocols:         []string{"http", "https"},
 				SignatureVersions: []string{"s3v4"},
 			},
+			endpoints.EndpointKey{
+				Region:  "us-iso-east-1",
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname:          "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov",
+				Protocols:         []string{"http", "https"},
+				SignatureVersions: []string{"s3v4"},
+			},
+			endpoints.EndpointKey{
+				Region:  "us-iso-east-1",
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname:          "s3-fips.us-iso-east-1.c2s.ic.gov",
+				Protocols:         []string{"http", "https"},
+				SignatureVersions: []string{"s3v4"},
+			},
 			endpoints.EndpointKey{
 				Region: "us-iso-west-1",
 			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region:  "us-iso-west-1",
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov",
+			},
+			endpoints.EndpointKey{
+				Region:  "us-iso-west-1",
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
+			},
 		},
 	},
 	{
@@ -664,9 +740,30 @@ var defaultPartitions = endpoints.Partitions{
 		RegionRegex:    partitionRegexp.AwsIsoB,
 		IsRegionalized: true,
 		Endpoints: endpoints.Endpoints{
+			endpoints.EndpointKey{
+				Region: "fips-us-isob-east-1",
+			}: endpoints.Endpoint{
+				Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "us-isob-east-1",
+				},
+				Deprecated: aws.TrueTernary,
+			},
 			endpoints.EndpointKey{
 				Region: "us-isob-east-1",
 			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region:  "us-isob-east-1",
+				Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+			}: {
+				Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov",
+			},
+			endpoints.EndpointKey{
+				Region:  "us-isob-east-1",
+				Variant: endpoints.FIPSVariant,
+			}: {
+				Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
+			},
 		},
 	},
 	{
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
index 7a4c30c59c..9d5847a052 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.18.6 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v1.18.5 (2023-12-08)
 
 * **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
index 52495f1fb6..d2e5a8ab8d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
@@ -3,4 +3,4 @@
 package sso
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.18.5"
+const goModuleVersion = "1.18.6"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
index 80df3bdde8..f77c4785e7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
@@ -1,3 +1,11 @@
+# v1.21.7 (2024-01-16)
+
+* No change notes available for this release.
+
+# v1.21.6 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
 # v1.21.5 (2023-12-08)
 
 * **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
index 98eaaa6d81..474a574ec7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
@@ -3,4 +3,4 @@
 package ssooidc
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.21.5"
+const goModuleVersion = "1.21.7"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
index c48da8b88a..cbd77fd291 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
@@ -283,6 +283,14 @@ var defaultPartitions = endpoints.Partitions{
 					Region: "il-central-1",
 				},
 			},
+			endpoints.EndpointKey{
+				Region: "me-central-1",
+			}: endpoints.Endpoint{
+				Hostname: "oidc.me-central-1.amazonaws.com",
+				CredentialScope: endpoints.CredentialScope{
+					Region: "me-central-1",
+				},
+			},
 			endpoints.EndpointKey{
 				Region: "me-south-1",
 			}: endpoints.Endpoint{
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
index 17dd41f359..f9b6404d19 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
@@ -1,3 +1,11 @@
+# v1.26.7 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.6 (2023-12-20)
+
+* No change notes available for this release.
+
 # v1.26.5 (2023-12-08)
 
 * **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
index 59cc4c70a3..369de83b8b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
@@ -552,6 +552,12 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op
 	if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok {
 		stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID())
 	}
+	if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok {
+		stack.Finalize.Remove((*retry.Attempt)(nil).ID())
+	}
+	if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok {
+		stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID())
+	}
 	stack.Deserialize.Clear()
 	stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID())
 	stack.Build.Remove("UserAgent")
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
index 61667eb2c2..962c336cf9 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
@@ -3,4 +3,4 @@
 package sts
 
 // goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.26.5"
+const goModuleVersion = "1.26.7"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
index ca4c881909..3dbd993b54 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
@@ -183,6 +183,9 @@ var defaultPartitions = endpoints.Partitions{
 			endpoints.EndpointKey{
 				Region: "ca-central-1",
 			}: endpoints.Endpoint{},
+			endpoints.EndpointKey{
+				Region: "ca-west-1",
+			}: endpoints.Endpoint{},
 			endpoints.EndpointKey{
 				Region: "eu-central-1",
 			}: endpoints.Endpoint{},
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
index 776e31b21d..c483e0cb8e 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -442,6 +442,17 @@ func (c *Config) WithUseDualStack(enable bool) *Config {
 	return c
 }
 
+// WithUseFIPSEndpoint sets a config UseFIPSEndpoint value returning a Config
+// pointer for chaining.
+func (c *Config) WithUseFIPSEndpoint(enable bool) *Config {
+	if enable {
+		c.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled
+	} else {
+		c.UseFIPSEndpoint = endpoints.FIPSEndpointStateDisabled
+	}
+	return c
+}
+
 // WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
 // returning a Config pointer for chaining.
 func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
index 604aeffdeb..f1f9ba4ec5 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
@@ -2,6 +2,7 @@ package ec2metadata
 
 import (
 	"fmt"
+	"github.com/aws/aws-sdk-go/aws"
 	"net/http"
 	"sync/atomic"
 	"time"
@@ -65,7 +66,9 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
 			switch requestFailureError.StatusCode() {
 			case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed:
 				atomic.StoreUint32(&t.disabled, 1)
-				t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError))
+				if t.client.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) {
+					t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError))
+				}
 			case http.StatusBadRequest:
 				r.Error = requestFailureError
 			}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index b3d8f8c2c9..d8ee8b14ef 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -31,6 +31,7 @@ const (
 	ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta).
 	ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne).
 	CaCentral1RegionID   = "ca-central-1"   // Canada (Central).
+	CaWest1RegionID      = "ca-west-1"      // Canada West (Calgary).
 	EuCentral1RegionID   = "eu-central-1"   // Europe (Frankfurt).
 	EuCentral2RegionID   = "eu-central-2"   // Europe (Zurich).
 	EuNorth1RegionID     = "eu-north-1"     // Europe (Stockholm).
@@ -190,6 +191,9 @@ var awsPartition = partition{
 		"ca-central-1": region{
 			Description: "Canada (Central)",
 		},
+		"ca-west-1": region{
+			Description: "Canada West (Calgary)",
+		},
 		"eu-central-1": region{
 			Description: "Europe (Frankfurt)",
 		},
@@ -291,6 +295,9 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -477,6 +484,24 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "acm-fips.ca-west-1.amazonaws.com",
+				},
+				endpointKey{
+					Region: "ca-west-1-fips",
+				}: endpoint{
+					Hostname: "acm-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -1269,6 +1294,14 @@ var awsPartition = partition{
 						Region: "ca-central-1",
 					},
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{
+					Hostname: "api.ecr.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+				},
 				endpointKey{
 					Region: "dkr-us-east-1",
 				}: endpoint{
@@ -1953,6 +1986,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -2251,6 +2287,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "apigateway-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "apigateway-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -2284,6 +2329,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "apigateway-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -2442,6 +2496,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -2530,6 +2587,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -2735,6 +2795,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -3526,6 +3589,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -4005,6 +4071,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "autoscaling-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "autoscaling-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -4038,6 +4113,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "autoscaling-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -4485,6 +4569,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -5095,6 +5182,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -5128,6 +5224,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -5283,6 +5388,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -5442,6 +5550,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ap-south-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-south-2",
+				}: endpoint{},
 				endpointKey{
 					Region: "ap-southeast-1",
 				}: endpoint{},
@@ -5573,6 +5684,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -6156,6 +6270,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -6621,6 +6738,9 @@ var awsPartition = partition{
 		},
 		"cognito-identity": service{
 			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "af-south-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "ap-northeast-1",
 				}: endpoint{},
@@ -6639,6 +6759,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ap-southeast-2",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-southeast-3",
+				}: endpoint{},
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
@@ -6745,6 +6868,9 @@ var awsPartition = partition{
 		},
 		"cognito-idp": service{
 			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "af-south-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "ap-northeast-1",
 				}: endpoint{},
@@ -6763,6 +6889,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ap-southeast-2",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-southeast-3",
+				}: endpoint{},
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
@@ -7330,6 +7459,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -8301,6 +8433,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "datasync-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "datasync-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -8334,6 +8475,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "datasync-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -8499,6 +8649,11 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "datazone-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{
+					Hostname: "datazone.ca-west-1.api.aws",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{
@@ -8819,6 +8974,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -8992,6 +9150,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -9016,6 +9177,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "eu-west-3",
 				}: endpoint{},
+				endpointKey{
+					Region: "il-central-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "me-central-1",
 				}: endpoint{},
@@ -9077,6 +9241,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "dms",
 				}: endpoint{
@@ -9392,6 +9559,42 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "eu-west-3",
 				}: endpoint{},
+				endpointKey{
+					Region: "fips-us-east-1",
+				}: endpoint{
+					Hostname: "drs-fips.us-east-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-east-1",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-east-2",
+				}: endpoint{
+					Hostname: "drs-fips.us-east-2.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-east-2",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-west-1",
+				}: endpoint{
+					Hostname: "drs-fips.us-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-west-2",
+				}: endpoint{
+					Hostname: "drs-fips.us-west-2.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-west-2",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "il-central-1",
 				}: endpoint{},
@@ -9407,15 +9610,39 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "us-east-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-east-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "drs-fips.us-east-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "us-east-2",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-east-2",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "drs-fips.us-east-2.amazonaws.com",
+				},
 				endpointKey{
 					Region: "us-west-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "drs-fips.us-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "us-west-2",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-west-2",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "drs-fips.us-west-2.amazonaws.com",
+				},
 			},
 		},
 		"ds": service{
@@ -9462,6 +9689,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "ds-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "ds-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -9495,6 +9731,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "ds-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -9639,6 +9884,24 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "dynamodb-fips.ca-west-1.amazonaws.com",
+				},
+				endpointKey{
+					Region: "ca-west-1-fips",
+				}: endpoint{
+					Hostname: "dynamodb-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -9802,6 +10065,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "ebs-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "ebs-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -9835,6 +10107,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "ebs-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -10163,6 +10444,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -10344,6 +10628,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -10454,6 +10741,166 @@ var awsPartition = partition{
 				},
 			},
 		},
+		"eks-auth": service{
+			Defaults: endpointDefaults{
+				defaultKey{}: endpoint{
+					DNSSuffix: "api.aws",
+				},
+				defaultKey{
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname:  "{service}-fips.{region}.{dnsSuffix}",
+					DNSSuffix: "api.aws",
+				},
+			},
+			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "af-south-1",
+				}: endpoint{
+					Hostname: "eks-auth.af-south-1.api.aws",
+				},
+				endpointKey{
+					Region: "ap-east-1",
+				}: endpoint{
+					Hostname: "eks-auth.ap-east-1.api.aws",
+				},
+				endpointKey{
+					Region: "ap-northeast-1",
+				}: endpoint{
+					Hostname: "eks-auth.ap-northeast-1.api.aws",
+				},
+				endpointKey{
+					Region: "ap-northeast-2",
+				}: endpoint{
+					Hostname: "eks-auth.ap-northeast-2.api.aws",
+				},
+				endpointKey{
+					Region: "ap-northeast-3",
+				}: endpoint{
+					Hostname: "eks-auth.ap-northeast-3.api.aws",
+				},
+				endpointKey{
+					Region: "ap-south-1",
+				}: endpoint{
+					Hostname: "eks-auth.ap-south-1.api.aws",
+				},
+				endpointKey{
+					Region: "ap-south-2",
+				}: endpoint{
+					Hostname: "eks-auth.ap-south-2.api.aws",
+				},
+				endpointKey{
+					Region: "ap-southeast-1",
+				}: endpoint{
+					Hostname: "eks-auth.ap-southeast-1.api.aws",
+				},
+				endpointKey{
+					Region: "ap-southeast-2",
+				}: endpoint{
+					Hostname: "eks-auth.ap-southeast-2.api.aws",
+				},
+				endpointKey{
+					Region: "ap-southeast-3",
+				}: endpoint{
+					Hostname: "eks-auth.ap-southeast-3.api.aws",
+				},
+				endpointKey{
+					Region: "ap-southeast-4",
+				}: endpoint{
+					Hostname: "eks-auth.ap-southeast-4.api.aws",
+				},
+				endpointKey{
+					Region: "ca-central-1",
+				}: endpoint{
+					Hostname: "eks-auth.ca-central-1.api.aws",
+				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{
+					Hostname: "eks-auth.ca-west-1.api.aws",
+				},
+				endpointKey{
+					Region: "eu-central-1",
+				}: endpoint{
+					Hostname: "eks-auth.eu-central-1.api.aws",
+				},
+				endpointKey{
+					Region: "eu-central-2",
+				}: endpoint{
+					Hostname: "eks-auth.eu-central-2.api.aws",
+				},
+				endpointKey{
+					Region: "eu-north-1",
+				}: endpoint{
+					Hostname: "eks-auth.eu-north-1.api.aws",
+				},
+				endpointKey{
+					Region: "eu-south-1",
+				}: endpoint{
+					Hostname: "eks-auth.eu-south-1.api.aws",
+				},
+				endpointKey{
+					Region: "eu-south-2",
+				}: endpoint{
+					Hostname: "eks-auth.eu-south-2.api.aws",
+				},
+				endpointKey{
+					Region: "eu-west-1",
+				}: endpoint{
+					Hostname: "eks-auth.eu-west-1.api.aws",
+				},
+				endpointKey{
+					Region: "eu-west-2",
+				}: endpoint{
+					Hostname: "eks-auth.eu-west-2.api.aws",
+				},
+				endpointKey{
+					Region: "eu-west-3",
+				}: endpoint{
+					Hostname: "eks-auth.eu-west-3.api.aws",
+				},
+				endpointKey{
+					Region: "il-central-1",
+				}: endpoint{
+					Hostname: "eks-auth.il-central-1.api.aws",
+				},
+				endpointKey{
+					Region: "me-central-1",
+				}: endpoint{
+					Hostname: "eks-auth.me-central-1.api.aws",
+				},
+				endpointKey{
+					Region: "me-south-1",
+				}: endpoint{
+					Hostname: "eks-auth.me-south-1.api.aws",
+				},
+				endpointKey{
+					Region: "sa-east-1",
+				}: endpoint{
+					Hostname: "eks-auth.sa-east-1.api.aws",
+				},
+				endpointKey{
+					Region: "us-east-1",
+				}: endpoint{
+					Hostname: "eks-auth.us-east-1.api.aws",
+				},
+				endpointKey{
+					Region: "us-east-2",
+				}: endpoint{
+					Hostname: "eks-auth.us-east-2.api.aws",
+				},
+				endpointKey{
+					Region: "us-west-1",
+				}: endpoint{
+					Hostname: "eks-auth.us-west-1.api.aws",
+				},
+				endpointKey{
+					Region: "us-west-2",
+				}: endpoint{
+					Hostname: "eks-auth.us-west-2.api.aws",
+				},
+			},
+		},
 		"elasticache": service{
 			Endpoints: serviceEndpoints{
 				endpointKey{
@@ -10492,6 +10939,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -11295,6 +11745,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -11455,6 +11908,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{
@@ -11490,6 +11952,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -12007,6 +12478,9 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "me-central-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "me-south-1",
 				}: endpoint{},
@@ -12175,6 +12649,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "aos.ca-central-1.api.aws",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: dualStackVariant,
+				}: endpoint{
+					Hostname: "aos.ca-west-1.api.aws",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -12428,6 +12911,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -12678,6 +13164,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -14669,6 +15158,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ap-southeast-3",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-southeast-4",
+				}: endpoint{},
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
@@ -14696,6 +15188,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "il-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "me-central-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "me-south-1",
 				}: endpoint{},
@@ -15150,6 +15645,11 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{
+					Hostname: "internetmonitor.ca-west-1.api.aws",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{
@@ -16698,6 +17198,11 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "kendra-ranking-fips.ca-central-1.api.aws",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{
+					Hostname: "kendra-ranking.ca-west-1.api.aws",
+				},
 				endpointKey{
 					Region: "eu-central-2",
 				}: endpoint{
@@ -16826,6 +17331,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -17303,6 +17811,24 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "kms-fips.ca-west-1.amazonaws.com",
+				},
+				endpointKey{
+					Region: "ca-west-1-fips",
+				}: endpoint{
+					Hostname: "kms-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -17851,6 +18377,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "lambda.ca-central-1.api.aws",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: dualStackVariant,
+				}: endpoint{
+					Hostname: "lambda.ca-west-1.api.aws",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -18243,6 +18778,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -18585,6 +19123,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -18766,12 +19307,18 @@ var awsPartition = partition{
 		},
 		"m2": service{
 			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "af-south-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "ap-northeast-1",
 				}: endpoint{},
 				endpointKey{
 					Region: "ap-northeast-2",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-northeast-3",
+				}: endpoint{},
 				endpointKey{
 					Region: "ap-south-1",
 				}: endpoint{},
@@ -18791,6 +19338,12 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "eu-north-1",
+				}: endpoint{},
+				endpointKey{
+					Region: "eu-south-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-west-1",
 				}: endpoint{},
@@ -19105,12 +19658,18 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ap-south-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-south-2",
+				}: endpoint{},
 				endpointKey{
 					Region: "ap-southeast-1",
 				}: endpoint{},
 				endpointKey{
 					Region: "ap-southeast-2",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-southeast-4",
+				}: endpoint{},
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
@@ -19129,6 +19688,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "eu-west-3",
 				}: endpoint{},
+				endpointKey{
+					Region: "me-central-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "sa-east-1",
 				}: endpoint{},
@@ -19393,6 +19955,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ap-southeast-2",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-southeast-4",
+				}: endpoint{},
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
@@ -19448,6 +20013,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ap-southeast-2",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-southeast-4",
+				}: endpoint{},
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
@@ -19503,6 +20071,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ap-southeast-2",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-southeast-4",
+				}: endpoint{},
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
@@ -19860,6 +20431,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -20293,6 +20867,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -20501,6 +21078,9 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "il-central-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "me-central-1",
 				}: endpoint{},
@@ -20979,6 +21559,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -21175,6 +21758,14 @@ var awsPartition = partition{
 						Region: "il-central-1",
 					},
 				},
+				endpointKey{
+					Region: "me-central-1",
+				}: endpoint{
+					Hostname: "oidc.me-central-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "me-central-1",
+					},
+				},
 				endpointKey{
 					Region: "me-south-1",
 				}: endpoint{
@@ -21766,6 +22357,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -22571,6 +23165,11 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "qbusiness.ca-central-1.api.aws",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{
+					Hostname: "qbusiness.ca-west-1.api.aws",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{
@@ -22843,6 +23442,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "ram-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "ram-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -22876,6 +23484,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "ram-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -23006,6 +23623,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "rbin-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "rbin-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -23039,6 +23665,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "rbin-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -23178,6 +23813,24 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "rds-fips.ca-west-1.amazonaws.com",
+				},
+				endpointKey{
+					Region: "ca-west-1-fips",
+				}: endpoint{
+					Hostname: "rds-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -23220,6 +23873,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "rds-fips.ca-west-1",
+				}: endpoint{
+					Hostname: "rds-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "rds-fips.us-east-1",
 				}: endpoint{
@@ -23274,6 +23936,24 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "rds.ca-west-1",
+				}: endpoint{
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region:  "rds.ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "rds-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "rds.us-east-1",
 				}: endpoint{
@@ -23576,6 +24256,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "redshift-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "redshift-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -23609,6 +24298,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "redshift-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -24252,6 +24950,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -24407,6 +25108,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ap-south-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-south-2",
+				}: endpoint{},
 				endpointKey{
 					Region: "ap-southeast-1",
 				}: endpoint{},
@@ -24416,18 +25120,27 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ap-southeast-3",
 				}: endpoint{},
+				endpointKey{
+					Region: "ap-southeast-4",
+				}: endpoint{},
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "eu-central-2",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-north-1",
 				}: endpoint{},
 				endpointKey{
 					Region: "eu-south-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "eu-south-2",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-west-1",
 				}: endpoint{},
@@ -24437,6 +25150,48 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "eu-west-3",
 				}: endpoint{},
+				endpointKey{
+					Region: "fips-us-east-1",
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-east-1",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-east-2",
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-east-2",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-west-1",
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-west-2",
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-west-2",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "il-central-1",
+				}: endpoint{},
+				endpointKey{
+					Region: "me-central-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "me-south-1",
 				}: endpoint{},
@@ -24446,15 +25201,39 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "us-east-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-east-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "us-east-2",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-east-2",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com",
+				},
 				endpointKey{
 					Region: "us-west-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "us-west-2",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-west-2",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com",
+				},
 			},
 		},
 		"route53": service{
@@ -24551,6 +25330,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -24791,6 +25573,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -25068,6 +25853,27 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: dualStackVariant,
+				}: endpoint{
+					Hostname: "s3.dualstack.ca-west-1.amazonaws.com",
+				},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "s3-fips.ca-west-1.amazonaws.com",
+				},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant | dualStackVariant,
+				}: endpoint{
+					Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -25153,6 +25959,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "s3-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -26397,6 +27212,27 @@ var awsPartition = partition{
 
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: dualStackVariant,
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant | dualStackVariant,
+				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1-fips",
+				}: endpoint{
+
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -26605,6 +27441,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -26753,21 +27592,81 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "eu-west-3",
 				}: endpoint{},
+				endpointKey{
+					Region: "fips-us-east-1",
+				}: endpoint{
+					Hostname: "securitylake-fips.us-east-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-east-1",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-east-2",
+				}: endpoint{
+					Hostname: "securitylake-fips.us-east-2.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-east-2",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-west-1",
+				}: endpoint{
+					Hostname: "securitylake-fips.us-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-west-2",
+				}: endpoint{
+					Hostname: "securitylake-fips.us-west-2.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-west-2",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "sa-east-1",
 				}: endpoint{},
 				endpointKey{
 					Region: "us-east-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-east-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "securitylake-fips.us-east-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "us-east-2",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-east-2",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "securitylake-fips.us-east-2.amazonaws.com",
+				},
 				endpointKey{
 					Region: "us-west-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "securitylake-fips.us-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "us-west-2",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-west-2",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "securitylake-fips.us-west-2.amazonaws.com",
+				},
 			},
 		},
 		"serverlessrepo": service{
@@ -27061,6 +27960,9 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -27584,6 +28486,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -28552,6 +29457,15 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "sns-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -28576,6 +29490,15 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "eu-west-3",
 				}: endpoint{},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "sns-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -28706,6 +29629,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -28863,6 +29789,15 @@ var awsPartition = partition{
 				}: endpoint{
 					Hostname: "ssm-fips.ca-central-1.amazonaws.com",
 				},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
+				endpointKey{
+					Region:  "ca-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "ssm-fips.ca-west-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -28896,6 +29831,15 @@ var awsPartition = partition{
 					},
 					Deprecated: boxedTrue,
 				},
+				endpointKey{
+					Region: "fips-ca-west-1",
+				}: endpoint{
+					Hostname: "ssm-fips.ca-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "ca-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "fips-us-east-1",
 				}: endpoint{
@@ -29480,6 +30424,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -29799,6 +30746,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -29905,6 +30855,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -30079,6 +31032,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -30227,6 +31183,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -30375,6 +31334,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -33301,6 +34263,9 @@ var awsPartition = partition{
 				endpointKey{
 					Region: "ca-central-1",
 				}: endpoint{},
+				endpointKey{
+					Region: "ca-west-1",
+				}: endpoint{},
 				endpointKey{
 					Region: "eu-central-1",
 				}: endpoint{},
@@ -34116,6 +35081,31 @@ var awscnPartition = partition{
 				}: endpoint{},
 			},
 		},
+		"eks-auth": service{
+			Defaults: endpointDefaults{
+				defaultKey{}: endpoint{
+					DNSSuffix: "api.amazonwebservices.com.cn",
+				},
+				defaultKey{
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname:  "{service}-fips.{region}.{dnsSuffix}",
+					DNSSuffix: "api.amazonwebservices.com.cn",
+				},
+			},
+			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "cn-north-1",
+				}: endpoint{
+					Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn",
+				},
+				endpointKey{
+					Region: "cn-northwest-1",
+				}: endpoint{
+					Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn",
+				},
+			},
+		},
 		"elasticache": service{
 			Endpoints: serviceEndpoints{
 				endpointKey{
@@ -34503,6 +35493,29 @@ var awscnPartition = partition{
 				}: endpoint{},
 			},
 		},
+		"iottwinmaker": service{
+			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "api-cn-north-1",
+				}: endpoint{
+					Hostname: "api.iottwinmaker.cn-north-1.amazonaws.com.cn",
+					CredentialScope: credentialScope{
+						Region: "cn-north-1",
+					},
+				},
+				endpointKey{
+					Region: "cn-north-1",
+				}: endpoint{},
+				endpointKey{
+					Region: "data-cn-north-1",
+				}: endpoint{
+					Hostname: "data.iottwinmaker.cn-north-1.amazonaws.com.cn",
+					CredentialScope: credentialScope{
+						Region: "cn-north-1",
+					},
+				},
+			},
+		},
 		"kafka": service{
 			Endpoints: serviceEndpoints{
 				endpointKey{
@@ -34775,6 +35788,16 @@ var awscnPartition = partition{
 				}: endpoint{},
 			},
 		},
+		"pipes": service{
+			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "cn-north-1",
+				}: endpoint{},
+				endpointKey{
+					Region: "cn-northwest-1",
+				}: endpoint{},
+			},
+		},
 		"polly": service{
 			Endpoints: serviceEndpoints{
 				endpointKey{
@@ -36455,6 +37478,13 @@ var awsusgovPartition = partition{
 				},
 			},
 		},
+		"bedrock": service{
+			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "us-gov-west-1",
+				}: endpoint{},
+			},
+		},
 		"cassandra": service{
 			Endpoints: serviceEndpoints{
 				endpointKey{
@@ -37667,6 +38697,31 @@ var awsusgovPartition = partition{
 				},
 			},
 		},
+		"eks-auth": service{
+			Defaults: endpointDefaults{
+				defaultKey{}: endpoint{
+					DNSSuffix: "api.aws",
+				},
+				defaultKey{
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname:  "{service}-fips.{region}.{dnsSuffix}",
+					DNSSuffix: "api.aws",
+				},
+			},
+			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "us-gov-east-1",
+				}: endpoint{
+					Hostname: "eks-auth.us-gov-east-1.api.aws",
+				},
+				endpointKey{
+					Region: "us-gov-west-1",
+				}: endpoint{
+					Hostname: "eks-auth.us-gov-west-1.api.aws",
+				},
+			},
+		},
 		"elasticache": service{
 			Defaults: endpointDefaults{
 				defaultKey{}: endpoint{},
@@ -40364,12 +41419,42 @@ var awsusgovPartition = partition{
 		},
 		"rolesanywhere": service{
 			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "fips-us-gov-east-1",
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-gov-east-1",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-gov-west-1",
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com",
+					CredentialScope: credentialScope{
+						Region: "us-gov-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "us-gov-east-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-gov-east-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com",
+				},
 				endpointKey{
 					Region: "us-gov-west-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-gov-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com",
+				},
 			},
 		},
 		"route53": service{
@@ -42644,6 +43729,19 @@ var awsisoPartition = partition{
 				}: endpoint{},
 			},
 		},
+		"guardduty": service{
+			IsRegionalized: boxedTrue,
+			Defaults: endpointDefaults{
+				defaultKey{}: endpoint{
+					Protocols: []string{"https"},
+				},
+			},
+			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "us-iso-east-1",
+				}: endpoint{},
+			},
+		},
 		"health": service{
 			Endpoints: serviceEndpoints{
 				endpointKey{
@@ -42794,12 +43892,42 @@ var awsisoPartition = partition{
 		},
 		"ram": service{
 			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "fips-us-iso-east-1",
+				}: endpoint{
+					Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov",
+					CredentialScope: credentialScope{
+						Region: "us-iso-east-1",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-iso-west-1",
+				}: endpoint{
+					Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov",
+					CredentialScope: credentialScope{
+						Region: "us-iso-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "us-iso-east-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-iso-east-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov",
+				},
 				endpointKey{
 					Region: "us-iso-west-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-iso-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov",
+				},
 			},
 		},
 		"rbin": service{
@@ -43024,15 +44152,61 @@ var awsisoPartition = partition{
 				},
 			},
 			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "fips-us-iso-east-1",
+				}: endpoint{
+					Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov",
+					CredentialScope: credentialScope{
+						Region: "us-iso-east-1",
+					},
+					Deprecated: boxedTrue,
+				},
+				endpointKey{
+					Region: "fips-us-iso-west-1",
+				}: endpoint{
+					Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
+					CredentialScope: credentialScope{
+						Region: "us-iso-west-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "us-iso-east-1",
 				}: endpoint{
 					Protocols:         []string{"http", "https"},
 					SignatureVersions: []string{"s3v4"},
 				},
+				endpointKey{
+					Region:  "us-iso-east-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname:          "s3-fips.us-iso-east-1.c2s.ic.gov",
+					Protocols:         []string{"http", "https"},
+					SignatureVersions: []string{"s3v4"},
+				},
+				endpointKey{
+					Region:  "us-iso-east-1",
+					Variant: fipsVariant | dualStackVariant,
+				}: endpoint{
+					Hostname:          "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov",
+					Protocols:         []string{"http", "https"},
+					SignatureVersions: []string{"s3v4"},
+				},
 				endpointKey{
 					Region: "us-iso-west-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-iso-west-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
+				},
+				endpointKey{
+					Region:  "us-iso-west-1",
+					Variant: fipsVariant | dualStackVariant,
+				}: endpoint{
+					Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov",
+				},
 			},
 		},
 		"secretsmanager": service{
@@ -43664,9 +44838,24 @@ var awsisobPartition = partition{
 		},
 		"ram": service{
 			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "fips-us-isob-east-1",
+				}: endpoint{
+					Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov",
+					CredentialScope: credentialScope{
+						Region: "us-isob-east-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "us-isob-east-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-isob-east-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov",
+				},
 			},
 		},
 		"rbin": service{
@@ -43805,9 +44994,30 @@ var awsisobPartition = partition{
 				},
 			},
 			Endpoints: serviceEndpoints{
+				endpointKey{
+					Region: "fips-us-isob-east-1",
+				}: endpoint{
+					Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
+					CredentialScope: credentialScope{
+						Region: "us-isob-east-1",
+					},
+					Deprecated: boxedTrue,
+				},
 				endpointKey{
 					Region: "us-isob-east-1",
 				}: endpoint{},
+				endpointKey{
+					Region:  "us-isob-east-1",
+					Variant: fipsVariant,
+				}: endpoint{
+					Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
+				},
+				endpointKey{
+					Region:  "us-isob-east-1",
+					Variant: fipsVariant | dualStackVariant,
+				}: endpoint{
+					Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov",
+				},
 			},
 		},
 		"secretsmanager": service{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index 9f8577b115..8f8370e8ad 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
 const SDKName = "aws-sdk-go"
 
 // SDKVersion is the version of this SDK
-const SDKVersion = "1.49.1"
+const SDKVersion = "1.49.22"
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
index a8c29bfbd5..8969526a6e 100644
--- a/vendor/github.com/go-logr/logr/README.md
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -91,11 +91,12 @@ logr design but also left out some parts and changed others:
 | Adding a name to a logger | `WithName` | no API |
 | Modify verbosity of log entries in a call chain | `V` | no API |
 | Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
+| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
 
 The high-level slog API is explicitly meant to be one of many different APIs
 that can be layered on top of a shared `slog.Handler`. logr is one such
-alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
-package.
+alternative API, with [interoperability](#slog-interoperability) provided by
+some conversion functions.
 
 ### Inspiration
 
@@ -145,24 +146,24 @@ There are implementations for the following logging libraries:
 ## slog interoperability
 
 Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
-and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
-`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
+and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
+`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
 As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
-slog API. `slogr` itself leaves that to the caller.
+slog API.
 
-## Using a `logr.Sink` as backend for slog
+### Using a `logr.LogSink` as backend for slog
 
 Ideally, a logr sink implementation should support both logr and slog by
-implementing both the normal logr interface(s) and `slogr.SlogSink`.  Because
+implementing both the normal logr interface(s) and `SlogSink`.  Because
 of a conflict in the parameters of the common `Enabled` method, it is [not
 possible to implement both slog.Handler and logr.Sink in the same
 type](https://github.com/golang/go/issues/59110).
 
 If both are supported, log calls can go from the high-level APIs to the backend
-without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
+without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
 convert back and forth without adding additional wrappers, with one exception:
 when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
-`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
+`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
 log calls.
 
 Such an implementation should also support values that implement specific
@@ -187,13 +188,13 @@ Not supporting slog has several drawbacks:
 These drawbacks are severe enough that applications using a mixture of slog and
 logr should switch to a different backend.
 
-## Using a `slog.Handler` as backend for logr
+### Using a `slog.Handler` as backend for logr
 
 Using a plain `slog.Handler` without support for logr works better than the
 other direction:
 - All logr verbosity levels can be mapped 1:1 to their corresponding slog level
   by negating them.
-- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
+- Stack unwinding is done by the `SlogSink` and the resulting program
   counter is passed to the `slog.Handler`.
 - Names added via `Logger.WithName` are gathered and recorded in an additional
   attribute with `logger` as key and the names separated by slash as value.
@@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
 with logr implementations without slog support is not important, then
 `slog.Valuer` is sufficient.
 
-## Context support for slog
+### Context support for slog
 
 Storing a logger in a `context.Context` is not supported by
-slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
-to fill this gap:
+slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
+used to fill this gap. They store and retrieve a `slog.Logger` pointer
+under the same context key that is also used by `NewContext` and
+`FromContext` for `logr.Logger` value.
 
-    func HandlerFromContext(ctx context.Context) slog.Handler {
-        logger, err := logr.FromContext(ctx)
-        if err == nil {
-            return slogr.NewSlogHandler(logger)
-        }
-        return slog.Default().Handler()
-    }
+When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
+automatically convert the `slog.Logger` to a
+`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
 
-    func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
-        return logr.NewContext(ctx, slogr.NewLogr(handler))
-    }
+With this approach, binaries which use either slog or logr are as efficient as
+possible with no unnecessary allocations. This is also why the API stores a
+`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
+on retrieval would need to allocate one.
 
-The downside is that storing and retrieving a `slog.Handler` needs more
-allocations compared to using a `logr.Logger`. Therefore the recommendation is
-to use the `logr.Logger` API in code which uses contextual logging.
+The downside is that switching back and forth needs more allocations. Because
+logr is the API that is already in use by different packages, in particular
+Kubernetes, the recommendation is to use the `logr.Logger` API in code which
+uses contextual logging.
+
+An alternative to adding values to a logger and storing that logger in the
+context is to store the values in the context and to configure a logging
+backend to extract those values when emitting log entries. This only works when
+log calls are passed the context, which is not supported by the logr API.
+
+With the slog API, it is possible, but not
+required. https://github.com/veqryn/slog-context is a package for slog which
+provides additional support code for this approach. It also contains wrappers
+for the context functions in logr, so developers who prefer to not use the logr
+APIs directly can use those instead and the resulting code will still be
+interoperable with logr.
 
 ## FAQ
 
diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go
new file mode 100644
index 0000000000..de8bcc3ad8
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
+// the value is always a Logger value. With Go >= 1.21, the value can be a
+// Logger value or a slog.Logger pointer.
+type contextKey struct{}
+
+// notFoundError exists to carry an IsNotFound method.
+type notFoundError struct{}
+
+func (notFoundError) Error() string {
+	return "no logr.Logger was present"
+}
+
+func (notFoundError) IsNotFound() bool {
+	return true
+}
diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go
new file mode 100644
index 0000000000..f012f9a18e
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_noslog.go
@@ -0,0 +1,49 @@
+//go:build !go1.21
+// +build !go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+	"context"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+	if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+		return v, nil
+	}
+
+	return Logger{}, notFoundError{}
+}
+
+// FromContextOrDiscard returns a Logger from ctx.  If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+	if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+		return v
+	}
+
+	return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+	return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go
new file mode 100644
index 0000000000..065ef0b828
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_slog.go
@@ -0,0 +1,83 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+	"context"
+	"fmt"
+	"log/slog"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+	v := ctx.Value(contextKey{})
+	if v == nil {
+		return Logger{}, notFoundError{}
+	}
+
+	switch v := v.(type) {
+	case Logger:
+		return v, nil
+	case *slog.Logger:
+		return FromSlogHandler(v.Handler()), nil
+	default:
+		// Not reached.
+		panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+	}
+}
+
+// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
+func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
+	v := ctx.Value(contextKey{})
+	if v == nil {
+		return nil
+	}
+
+	switch v := v.(type) {
+	case Logger:
+		return slog.New(ToSlogHandler(v))
+	case *slog.Logger:
+		return v
+	default:
+		// Not reached.
+		panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+	}
+}
+
+// FromContextOrDiscard returns a Logger from ctx.  If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+	if logger, err := FromContext(ctx); err == nil {
+		return logger
+	}
+	return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+	return context.WithValue(ctx, contextKey{}, logger)
+}
+
+// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
+// provided slog.Logger.
+func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
+	return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
index 12e5807cc5..fb2f866f4b 100644
--- a/vendor/github.com/go-logr/logr/funcr/funcr.go
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -100,6 +100,11 @@ type Options struct {
 	// details, see docs for Go's time.Layout.
 	TimestampFormat string
 
+	// LogInfoLevel tells funcr what key to use to log the info level.
+	// If not specified, the info level will be logged as "level".
+	// If this is set to "", the info level will not be logged at all.
+	LogInfoLevel *string
+
 	// Verbosity tells funcr which V logs to produce.  Higher values enable
 	// more logs.  Info logs at or below this level will be written, while logs
 	// above this level will be discarded.
@@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
 	if opts.MaxLogDepth == 0 {
 		opts.MaxLogDepth = defaultMaxLogDepth
 	}
+	if opts.LogInfoLevel == nil {
+		opts.LogInfoLevel = new(string)
+		*opts.LogInfoLevel = "level"
+	}
 	f := Formatter{
 		outputFormat: outfmt,
 		prefix:       "",
@@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
 // implementation. It should be constructed with NewFormatter. Some of
 // its methods directly implement logr.LogSink.
 type Formatter struct {
-	outputFormat outputFormat
-	prefix       string
-	values       []any
-	valuesStr    string
-	depth        int
-	opts         *Options
+	outputFormat    outputFormat
+	prefix          string
+	values          []any
+	valuesStr       string
+	parentValuesStr string
+	depth           int
+	opts            *Options
+	group           string // for slog groups
+	groupDepth      int
 }
 
 // outputFormat indicates which outputFormat to use.
@@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string {
 	// Empirically bytes.Buffer is faster than strings.Builder for this.
 	buf := bytes.NewBuffer(make([]byte, 0, 1024))
 	if f.outputFormat == outputJSON {
-		buf.WriteByte('{')
+		buf.WriteByte('{') // for the whole line
 	}
+
 	vals := builtins
 	if hook := f.opts.RenderBuiltinsHook; hook != nil {
 		vals = hook(f.sanitize(vals))
 	}
 	f.flatten(buf, vals, false, false) // keys are ours, no need to escape
 	continuing := len(builtins) > 0
-	if len(f.valuesStr) > 0 {
+
+	if f.parentValuesStr != "" {
 		if continuing {
-			if f.outputFormat == outputJSON {
-				buf.WriteByte(',')
-			} else {
-				buf.WriteByte(' ')
-			}
+			buf.WriteByte(f.comma())
 		}
+		buf.WriteString(f.parentValuesStr)
 		continuing = true
-		buf.WriteString(f.valuesStr)
 	}
+
+	groupDepth := f.groupDepth
+	if f.group != "" {
+		if f.valuesStr != "" || len(args) != 0 {
+			if continuing {
+				buf.WriteByte(f.comma())
+			}
+			buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
+			buf.WriteByte(f.colon())
+			buf.WriteByte('{') // for the group
+			continuing = false
+		} else {
+			// The group was empty
+			groupDepth--
+		}
+	}
+
+	if f.valuesStr != "" {
+		if continuing {
+			buf.WriteByte(f.comma())
+		}
+		buf.WriteString(f.valuesStr)
+		continuing = true
+	}
+
 	vals = args
 	if hook := f.opts.RenderArgsHook; hook != nil {
 		vals = hook(f.sanitize(vals))
 	}
 	f.flatten(buf, vals, continuing, true) // escape user-provided keys
-	if f.outputFormat == outputJSON {
-		buf.WriteByte('}')
+
+	for i := 0; i < groupDepth; i++ {
+		buf.WriteByte('}') // for the groups
 	}
+
+	if f.outputFormat == outputJSON {
+		buf.WriteByte('}') // for the whole line
+	}
+
 	return buf.String()
 }
 
@@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
 	if len(kvList)%2 != 0 {
 		kvList = append(kvList, noValue)
 	}
+	copied := false
 	for i := 0; i < len(kvList); i += 2 {
 		k, ok := kvList[i].(string)
 		if !ok {
+			if !copied {
+				newList := make([]any, len(kvList))
+				copy(newList, kvList)
+				kvList = newList
+				copied = true
+			}
 			k = f.nonStringKey(kvList[i])
 			kvList[i] = k
 		}
@@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
 
 		if i > 0 || continuing {
 			if f.outputFormat == outputJSON {
-				buf.WriteByte(',')
+				buf.WriteByte(f.comma())
 			} else {
 				// In theory the format could be something we don't understand.  In
 				// practice, we control it, so it won't be.
@@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
 			}
 		}
 
-		if escapeKeys {
-			buf.WriteString(prettyString(k))
-		} else {
-			// this is faster
-			buf.WriteByte('"')
-			buf.WriteString(k)
-			buf.WriteByte('"')
-		}
-		if f.outputFormat == outputJSON {
-			buf.WriteByte(':')
-		} else {
-			buf.WriteByte('=')
-		}
+		buf.WriteString(f.quoted(k, escapeKeys))
+		buf.WriteByte(f.colon())
 		buf.WriteString(f.pretty(v))
 	}
 	return kvList
 }
 
+func (f Formatter) quoted(str string, escape bool) string {
+	if escape {
+		return prettyString(str)
+	}
+	// this is faster
+	return `"` + str + `"`
+}
+
+func (f Formatter) comma() byte {
+	if f.outputFormat == outputJSON {
+		return ','
+	}
+	return ' '
+}
+
+func (f Formatter) colon() byte {
+	if f.outputFormat == outputJSON {
+		return ':'
+	}
+	return '='
+}
+
 func (f Formatter) pretty(value any) string {
 	return f.prettyWithFlags(value, 0, 0)
 }
@@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
 		}
 		for i := 0; i < len(v); i += 2 {
 			if i > 0 {
-				buf.WriteByte(',')
+				buf.WriteByte(f.comma())
 			}
 			k, _ := v[i].(string) // sanitize() above means no need to check success
 			// arbitrary keys might need escaping
 			buf.WriteString(prettyString(k))
-			buf.WriteByte(':')
+			buf.WriteByte(f.colon())
 			buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
 		}
 		if flags&flagRawStruct == 0 {
@@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
 				continue
 			}
 			if printComma {
-				buf.WriteByte(',')
+				buf.WriteByte(f.comma())
 			}
 			printComma = true // if we got here, we are rendering a field
 			if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
@@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
 				name = fld.Name
 			}
 			// field names can't contain characters which need escaping
-			buf.WriteByte('"')
-			buf.WriteString(name)
-			buf.WriteByte('"')
-			buf.WriteByte(':')
+			buf.WriteString(f.quoted(name, false))
+			buf.WriteByte(f.colon())
 			buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
 		}
 		if flags&flagRawStruct == 0 {
@@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
 		buf.WriteByte('[')
 		for i := 0; i < v.Len(); i++ {
 			if i > 0 {
-				buf.WriteByte(',')
+				buf.WriteByte(f.comma())
 			}
 			e := v.Index(i)
 			buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
@@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
 		i := 0
 		for it.Next() {
 			if i > 0 {
-				buf.WriteByte(',')
+				buf.WriteByte(f.comma())
 			}
 			// If a map key supports TextMarshaler, use it.
 			keystr := ""
@@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
 				}
 			}
 			buf.WriteString(keystr)
-			buf.WriteByte(':')
+			buf.WriteByte(f.colon())
 			buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
 			i++
 		}
@@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any {
 	return kvList
 }
 
+// startGroup opens a new group scope (basically a sub-struct), which locks all
+// the current saved values and starts them anew.  This is needed to satisfy
+// slog.
+func (f *Formatter) startGroup(group string) {
+	// Unnamed groups are just inlined.
+	if group == "" {
+		return
+	}
+
+	// Any saved values can no longer be changed.
+	buf := bytes.NewBuffer(make([]byte, 0, 1024))
+	continuing := false
+
+	if f.parentValuesStr != "" {
+		buf.WriteString(f.parentValuesStr)
+		continuing = true
+	}
+
+	if f.group != "" && f.valuesStr != "" {
+		if continuing {
+			buf.WriteByte(f.comma())
+		}
+		buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
+		buf.WriteByte(f.colon())
+		buf.WriteByte('{') // for the group
+		continuing = false
+	}
+
+	if f.valuesStr != "" {
+		if continuing {
+			buf.WriteByte(f.comma())
+		}
+		buf.WriteString(f.valuesStr)
+	}
+
+	// NOTE: We don't close the scope here - that's done later, when a log line
+	// is actually rendered (because we have N scopes to close).
+
+	f.parentValuesStr = buf.String()
+
+	// Start collecting new values.
+	f.group = group
+	f.groupDepth++
+	f.valuesStr = ""
+	f.values = nil
+}
+
 // Init configures this Formatter from runtime info, such as the call depth
 // imposed by logr itself.
 // Note that this receiver is a pointer, so depth can be saved.
@@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args
 	if policy := f.opts.LogCaller; policy == All || policy == Info {
 		args = append(args, "caller", f.caller())
 	}
-	args = append(args, "level", level, "msg", msg)
+	if key := *f.opts.LogInfoLevel; key != "" {
+		args = append(args, key, level)
+	}
+	args = append(args, "msg", msg)
 	return prefix, f.render(args, kvList)
 }
 
diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go
new file mode 100644
index 0000000000..7bd84761e2
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go
@@ -0,0 +1,105 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package funcr
+
+import (
+	"context"
+	"log/slog"
+
+	"github.com/go-logr/logr"
+)
+
+var _ logr.SlogSink = &fnlogger{}
+
+const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
+
+func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
+	kvList := make([]any, 0, 2*record.NumAttrs())
+	record.Attrs(func(attr slog.Attr) bool {
+		kvList = attrToKVs(attr, kvList)
+		return true
+	})
+
+	if record.Level >= slog.LevelError {
+		l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
+	} else {
+		level := l.levelFromSlog(record.Level)
+		l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
+	}
+	return nil
+}
+
+func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
+	kvList := make([]any, 0, 2*len(attrs))
+	for _, attr := range attrs {
+		kvList = attrToKVs(attr, kvList)
+	}
+	l.AddValues(kvList)
+	return &l
+}
+
+func (l fnlogger) WithGroup(name string) logr.SlogSink {
+	l.startGroup(name)
+	return &l
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList.  It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, kvList []any) []any {
+	attrVal := attr.Value.Resolve()
+	if attrVal.Kind() == slog.KindGroup {
+		groupVal := attrVal.Group()
+		grpKVs := make([]any, 0, 2*len(groupVal))
+		for _, attr := range groupVal {
+			grpKVs = attrToKVs(attr, grpKVs)
+		}
+		if attr.Key == "" {
+			// slog says we have to inline these
+			kvList = append(kvList, grpKVs...)
+		} else {
+			kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
+		}
+	} else if attr.Key != "" {
+		kvList = append(kvList, attr.Key, attrVal.Any())
+	}
+
+	return kvList
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+//	logrV0 := getMyLogger()
+//	logrV2 := logrV0.V(2)
+//	slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+//	slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+//	slogV2.Info("msg")  // =~  logrV2.V(0) =~ logrV0.V(2)
+//	slogv2.Warn("msg")  // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l fnlogger) levelFromSlog(level slog.Level) int {
+	result := -level
+	if result < 0 {
+		result = 0 // because LogSink doesn't expect negative V levels
+	}
+	return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
index 2a5075a180..b4428e105b 100644
--- a/vendor/github.com/go-logr/logr/logr.go
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -207,10 +207,6 @@ limitations under the License.
 // those.
 package logr
 
-import (
-	"context"
-)
-
 // New returns a new Logger instance.  This is primarily used by libraries
 // implementing LogSink, rather than end users.  Passing a nil sink will create
 // a Logger which discards all log lines.
@@ -410,45 +406,6 @@ func (l Logger) IsZero() bool {
 	return l.sink == nil
 }
 
-// contextKey is how we find Loggers in a context.Context.
-type contextKey struct{}
-
-// FromContext returns a Logger from ctx or an error if no Logger is found.
-func FromContext(ctx context.Context) (Logger, error) {
-	if v, ok := ctx.Value(contextKey{}).(Logger); ok {
-		return v, nil
-	}
-
-	return Logger{}, notFoundError{}
-}
-
-// notFoundError exists to carry an IsNotFound method.
-type notFoundError struct{}
-
-func (notFoundError) Error() string {
-	return "no logr.Logger was present"
-}
-
-func (notFoundError) IsNotFound() bool {
-	return true
-}
-
-// FromContextOrDiscard returns a Logger from ctx.  If no Logger is found, this
-// returns a Logger that discards all log messages.
-func FromContextOrDiscard(ctx context.Context) Logger {
-	if v, ok := ctx.Value(contextKey{}).(Logger); ok {
-		return v
-	}
-
-	return Discard()
-}
-
-// NewContext returns a new Context, derived from ctx, which carries the
-// provided Logger.
-func NewContext(ctx context.Context, logger Logger) context.Context {
-	return context.WithValue(ctx, contextKey{}, logger)
-}
-
 // RuntimeInfo holds information that the logr "core" library knows which
 // LogSinks might want to know.
 type RuntimeInfo struct {
diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go
new file mode 100644
index 0000000000..82d1ba4948
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/sloghandler.go
@@ -0,0 +1,192 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+	"context"
+	"log/slog"
+)
+
+type slogHandler struct {
+	// May be nil, in which case all logs get discarded.
+	sink LogSink
+	// Non-nil if sink is non-nil and implements SlogSink.
+	slogSink SlogSink
+
+	// groupPrefix collects values from WithGroup calls. It gets added as
+	// prefix to value keys when handling a log record.
+	groupPrefix string
+
+	// levelBias can be set when constructing the handler to influence the
+	// slog.Level of log records. A positive levelBias reduces the
+	// slog.Level value. slog has no API to influence this value after the
+	// handler got created, so it can only be set indirectly through
+	// Logger.V.
+	levelBias slog.Level
+}
+
+var _ slog.Handler = &slogHandler{}
+
+// groupSeparator is used to concatenate WithGroup names and attribute keys.
+const groupSeparator = "."
+
+// GetLevel is used for black box unit testing.
+func (l *slogHandler) GetLevel() slog.Level {
+	return l.levelBias
+}
+
+func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
+	return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
+}
+
+func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
+	if l.slogSink != nil {
+		// Only adjust verbosity level of log entries < slog.LevelError.
+		if record.Level < slog.LevelError {
+			record.Level -= l.levelBias
+		}
+		return l.slogSink.Handle(ctx, record)
+	}
+
+	// No need to check for nil sink here because Handle will only be called
+	// when Enabled returned true.
+
+	kvList := make([]any, 0, 2*record.NumAttrs())
+	record.Attrs(func(attr slog.Attr) bool {
+		kvList = attrToKVs(attr, l.groupPrefix, kvList)
+		return true
+	})
+	if record.Level >= slog.LevelError {
+		l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
+	} else {
+		level := l.levelFromSlog(record.Level)
+		l.sinkWithCallDepth().Info(level, record.Message, kvList...)
+	}
+	return nil
+}
+
+// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
+// are called by Handle, code in slog gets skipped.
+//
+// This offset currently (Go 1.21.0) works for calls through
+// slog.New(ToSlogHandler(...)).  There's no guarantee that the call
+// chain won't change. Wrapping the handler will also break unwinding. It's
+// still better than not adjusting at all....
+//
+// This cannot be done when constructing the handler because FromSlogHandler needs
+// access to the original sink without this adjustment. A second copy would
+// work, but then WithAttrs would have to be called for both of them.
+func (l *slogHandler) sinkWithCallDepth() LogSink {
+	if sink, ok := l.sink.(CallDepthLogSink); ok {
+		return sink.WithCallDepth(2)
+	}
+	return l.sink
+}
+
+func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+	if l.sink == nil || len(attrs) == 0 {
+		return l
+	}
+
+	clone := *l
+	if l.slogSink != nil {
+		clone.slogSink = l.slogSink.WithAttrs(attrs)
+		clone.sink = clone.slogSink
+	} else {
+		kvList := make([]any, 0, 2*len(attrs))
+		for _, attr := range attrs {
+			kvList = attrToKVs(attr, l.groupPrefix, kvList)
+		}
+		clone.sink = l.sink.WithValues(kvList...)
+	}
+	return &clone
+}
+
+func (l *slogHandler) WithGroup(name string) slog.Handler {
+	if l.sink == nil {
+		return l
+	}
+	if name == "" {
+		// slog says to inline empty groups
+		return l
+	}
+	clone := *l
+	if l.slogSink != nil {
+		clone.slogSink = l.slogSink.WithGroup(name)
+		clone.sink = clone.slogSink
+	} else {
+		clone.groupPrefix = addPrefix(clone.groupPrefix, name)
+	}
+	return &clone
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList.  It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
+	attrVal := attr.Value.Resolve()
+	if attrVal.Kind() == slog.KindGroup {
+		groupVal := attrVal.Group()
+		grpKVs := make([]any, 0, 2*len(groupVal))
+		prefix := groupPrefix
+		if attr.Key != "" {
+			prefix = addPrefix(groupPrefix, attr.Key)
+		}
+		for _, attr := range groupVal {
+			grpKVs = attrToKVs(attr, prefix, grpKVs)
+		}
+		kvList = append(kvList, grpKVs...)
+	} else if attr.Key != "" {
+		kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
+	}
+
+	return kvList
+}
+
+func addPrefix(prefix, name string) string {
+	if prefix == "" {
+		return name
+	}
+	if name == "" {
+		return prefix
+	}
+	return prefix + groupSeparator + name
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+//	logrV0 := getMyLogger()
+//	logrV2 := logrV0.V(2)
+//	slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+//	slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+//	slogV2.Info("msg")  // =~  logrV2.V(0) =~ logrV0.V(2)
+//	slogv2.Warn("msg")  // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l *slogHandler) levelFromSlog(level slog.Level) int {
+	result := -level
+	result += l.levelBias // in case the original Logger had a V level
+	if result < 0 {
+		result = 0 // because LogSink doesn't expect negative V levels
+	}
+	return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go
new file mode 100644
index 0000000000..28a83d0243
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogr.go
@@ -0,0 +1,100 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+	"context"
+	"log/slog"
+)
+
+// FromSlogHandler returns a Logger which writes to the slog.Handler.
+//
+// The logr verbosity level is mapped to slog levels such that V(0) becomes
+// slog.LevelInfo and V(4) becomes slog.LevelDebug.
+func FromSlogHandler(handler slog.Handler) Logger {
+	if handler, ok := handler.(*slogHandler); ok {
+		if handler.sink == nil {
+			return Discard()
+		}
+		return New(handler.sink).V(int(handler.levelBias))
+	}
+	return New(&slogSink{handler: handler})
+}
+
+// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
+//
+// The returned logger writes all records with level >= slog.LevelError as
+// error log entries with LogSink.Error, regardless of the verbosity level of
+// the Logger:
+//
+//	logger := <some Logger with 0 as verbosity level>
+//	slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
+//
+// The level of all other records gets reduced by the verbosity
+// level of the Logger and the result is negated. If it happens
+// to be negative, then it gets replaced by zero because a LogSink
+// is not expected to handled negative levels:
+//
+//	slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
+//	slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
+//	slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
+//	slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
+func ToSlogHandler(logger Logger) slog.Handler {
+	if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
+		return sink.handler
+	}
+
+	handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
+	if slogSink, ok := handler.sink.(SlogSink); ok {
+		handler.slogSink = slogSink
+	}
+	return handler
+}
+
+// SlogSink is an optional interface that a LogSink can implement to support
+// logging through the slog.Logger or slog.Handler APIs better. It then should
+// also support special slog values like slog.Group. When used as a
+// slog.Handler, the advantages are:
+//
+//   - stack unwinding gets avoided in favor of logging the pre-recorded PC,
+//     as intended by slog
+//   - proper grouping of key/value pairs via WithGroup
+//   - verbosity levels > slog.LevelInfo can be recorded
+//   - less overhead
+//
+// Both APIs (Logger and slog.Logger/Handler) then are supported equally
+// well. Developers can pick whatever API suits them better and/or mix
+// packages which use either API in the same binary with a common logging
+// implementation.
+//
+// This interface is necessary because the type implementing the LogSink
+// interface cannot also implement the slog.Handler interface due to the
+// different prototype of the common Enabled method.
+//
+// An implementation could support both interfaces in two different types, but then
+// additional interfaces would be needed to convert between those types in FromSlogHandler
+// and ToSlogHandler.
+type SlogSink interface {
+	LogSink
+
+	Handle(ctx context.Context, record slog.Record) error
+	WithAttrs(attrs []slog.Attr) SlogSink
+	WithGroup(name string) SlogSink
+}
diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go
new file mode 100644
index 0000000000..4060fcbc2b
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogsink.go
@@ -0,0 +1,120 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+	"context"
+	"log/slog"
+	"runtime"
+	"time"
+)
+
+var (
+	_ LogSink          = &slogSink{}
+	_ CallDepthLogSink = &slogSink{}
+	_ Underlier        = &slogSink{}
+)
+
+// Underlier is implemented by the LogSink returned by NewFromLogHandler.
+type Underlier interface {
+	// GetUnderlying returns the Handler used by the LogSink.
+	GetUnderlying() slog.Handler
+}
+
+const (
+	// nameKey is used to log the `WithName` values as an additional attribute.
+	nameKey = "logger"
+
+	// errKey is used to log the error parameter of Error as an additional attribute.
+	errKey = "err"
+)
+
+type slogSink struct {
+	callDepth int
+	name      string
+	handler   slog.Handler
+}
+
+func (l *slogSink) Init(info RuntimeInfo) {
+	l.callDepth = info.CallDepth
+}
+
+func (l *slogSink) GetUnderlying() slog.Handler {
+	return l.handler
+}
+
+func (l *slogSink) WithCallDepth(depth int) LogSink {
+	newLogger := *l
+	newLogger.callDepth += depth
+	return &newLogger
+}
+
+func (l *slogSink) Enabled(level int) bool {
+	return l.handler.Enabled(context.Background(), slog.Level(-level))
+}
+
+func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
+	l.log(nil, msg, slog.Level(-level), kvList...)
+}
+
+func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
+	l.log(err, msg, slog.LevelError, kvList...)
+}
+
+func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
+	var pcs [1]uintptr
+	// skip runtime.Callers, this function, Info/Error, and all helper functions above that.
+	runtime.Callers(3+l.callDepth, pcs[:])
+
+	record := slog.NewRecord(time.Now(), level, msg, pcs[0])
+	if l.name != "" {
+		record.AddAttrs(slog.String(nameKey, l.name))
+	}
+	if err != nil {
+		record.AddAttrs(slog.Any(errKey, err))
+	}
+	record.Add(kvList...)
+	_ = l.handler.Handle(context.Background(), record)
+}
+
+func (l slogSink) WithName(name string) LogSink {
+	if l.name != "" {
+		l.name += "/"
+	}
+	l.name += name
+	return &l
+}
+
+func (l slogSink) WithValues(kvList ...interface{}) LogSink {
+	l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
+	return &l
+}
+
+func kvListToAttrs(kvList ...interface{}) []slog.Attr {
+	// We don't need the record itself, only its Add method.
+	record := slog.NewRecord(time.Time{}, 0, "", 0)
+	record.Add(kvList...)
+	attrs := make([]slog.Attr, 0, record.NumAttrs())
+	record.Attrs(func(attr slog.Attr) bool {
+		attrs = append(attrs, attr)
+		return true
+	})
+	return attrs
+}
diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go
index 72e466eec1..3db5287d6d 100644
--- a/vendor/github.com/influxdata/influxdb/models/points.go
+++ b/vendor/github.com/influxdata/influxdb/models/points.go
@@ -1647,7 +1647,7 @@ func AppendMakeKey(dst []byte, name []byte, tags Tags) []byte {
 	// unescape the name and then re-escape it to avoid double escaping.
 	// The key should always be stored in escaped form.
 	dst = append(dst, EscapeMeasurement(unescapeMeasurement(name))...)
-	dst = tags.AppendHashKey(dst)
+	dst = tags.AppendHashKey(dst, true)
 	return dst
 }
 
@@ -2208,8 +2208,8 @@ func (a Tags) Merge(other map[string]string) Tags {
 }
 
 // HashKey hashes all of a tag's keys.
-func (a Tags) HashKey() []byte {
-	return a.AppendHashKey(nil)
+func (a Tags) HashKey(escapeTags bool) []byte {
+	return a.AppendHashKey(nil, escapeTags)
 }
 
 func (a Tags) needsEscape() bool {
@@ -2226,7 +2226,7 @@ func (a Tags) needsEscape() bool {
 }
 
 // AppendHashKey appends the result of hashing all of a tag's keys and values to dst and returns the extended buffer.
-func (a Tags) AppendHashKey(dst []byte) []byte {
+func (a Tags) AppendHashKey(dst []byte, escapeTags bool) []byte {
 	// Empty maps marshal to empty bytes.
 	if len(a) == 0 {
 		return dst
@@ -2236,7 +2236,7 @@ func (a Tags) AppendHashKey(dst []byte) []byte {
 
 	sz := 0
 	var escaped Tags
-	if a.needsEscape() {
+	if escapeTags && a.needsEscape() {
 		var tmp [20]Tag
 		if len(a) < len(tmp) {
 			escaped = tmp[:len(a)]
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE
deleted file mode 100644
index 8dada3edaf..0000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright {yyyy} {name of copyright owner}
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE
deleted file mode 100644
index 5d8cb5b72e..0000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/NOTICE
+++ /dev/null
@@ -1 +0,0 @@
-Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore
deleted file mode 100644
index e16fb946bb..0000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-cover.dat
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile
deleted file mode 100644
index 81be214370..0000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-all:
-
-cover:
-	go test -cover -v -coverprofile=cover.dat ./...
-	go tool cover -func cover.dat
-
-.PHONY: cover
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go
deleted file mode 100644
index 7c08e564f1..0000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/decode.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
-	"encoding/binary"
-	"errors"
-	"io"
-
-	"google.golang.org/protobuf/proto"
-)
-
-// TODO: Give error package name prefix in next minor release.
-var errInvalidVarint = errors.New("invalid varint32 encountered")
-
-// ReadDelimited decodes a message from the provided length-delimited stream,
-// where the length is encoded as 32-bit varint prefix to the message body.
-// It returns the total number of bytes read and any applicable error.  This is
-// roughly equivalent to the companion Java API's
-// MessageLite#parseDelimitedFrom.  As per the reader contract, this function
-// calls r.Read repeatedly as required until exactly one message including its
-// prefix is read and decoded (or an error has occurred).  The function never
-// reads more bytes from the stream than required.  The function never returns
-// an error if a message has been read and decoded correctly, even if the end
-// of the stream has been reached in doing so.  In that case, any subsequent
-// calls return (0, io.EOF).
-func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
-	// TODO: Consider allowing the caller to specify a decode buffer in the
-	// next major version.
-
-	// TODO: Consider using error wrapping to annotate error state in pass-
-	// through cases in the next minor version.
-
-	// Per AbstractParser#parsePartialDelimitedFrom with
-	// CodedInputStream#readRawVarint32.
-	var headerBuf [binary.MaxVarintLen32]byte
-	var bytesRead, varIntBytes int
-	var messageLength uint64
-	for varIntBytes == 0 { // i.e. no varint has been decoded yet.
-		if bytesRead >= len(headerBuf) {
-			return bytesRead, errInvalidVarint
-		}
-		// We have to read byte by byte here to avoid reading more bytes
-		// than required. Each read byte is appended to what we have
-		// read before.
-		newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
-		if newBytesRead == 0 {
-			if err != nil {
-				return bytesRead, err
-			}
-			// A Reader should not return (0, nil); but if it does, it should
-			// be treated as no-op according to the Reader contract.
-			continue
-		}
-		bytesRead += newBytesRead
-		// Now present everything read so far to the varint decoder and
-		// see if a varint can be decoded already.
-		messageLength, varIntBytes = binary.Uvarint(headerBuf[:bytesRead])
-	}
-
-	messageBuf := make([]byte, messageLength)
-	newBytesRead, err := io.ReadFull(r, messageBuf)
-	bytesRead += newBytesRead
-	if err != nil {
-		return bytesRead, err
-	}
-
-	return bytesRead, proto.Unmarshal(messageBuf, m)
-}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go
deleted file mode 100644
index e58dd9d297..0000000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/encode.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
-	"encoding/binary"
-	"io"
-
-	"google.golang.org/protobuf/proto"
-)
-
-// WriteDelimited encodes and dumps a message to the provided writer prefixed
-// with a 32-bit varint indicating the length of the encoded message, producing
-// a length-delimited record stream, which can be used to chain together
-// encoded messages of the same type together in a file.  It returns the total
-// number of bytes written and any applicable error.  This is roughly
-// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
-func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
-	// TODO: Consider allowing the caller to specify an encode buffer in the
-	// next major version.
-
-	buffer, err := proto.Marshal(m)
-	if err != nil {
-		return 0, err
-	}
-
-	var buf [binary.MaxVarintLen32]byte
-	encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
-
-	sync, err := w.Write(buf[:encodedLength])
-	if err != nil {
-		return sync, err
-	}
-
-	n, err = w.Write(buffer)
-	return n + sync, err
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 1feba62c6c..b5c8bcb395 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -475,6 +475,9 @@ type HistogramOpts struct {
 
 	// now is for testing purposes, by default it's time.Now.
 	now func() time.Time
+
+	// afterFunc is for testing purposes, by default it's time.AfterFunc.
+	afterFunc func(time.Duration, func()) *time.Timer
 }
 
 // HistogramVecOpts bundles the options to create a HistogramVec metric.
@@ -526,7 +529,9 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 	if opts.now == nil {
 		opts.now = time.Now
 	}
-
+	if opts.afterFunc == nil {
+		opts.afterFunc = time.AfterFunc
+	}
 	h := &histogram{
 		desc:                            desc,
 		upperBounds:                     opts.Buckets,
@@ -536,6 +541,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
 		nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
 		lastResetTime:                   opts.now(),
 		now:                             opts.now,
+		afterFunc:                       opts.afterFunc,
 	}
 	if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
 		h.upperBounds = DefBuckets
@@ -716,9 +722,16 @@ type histogram struct {
 	nativeHistogramMinResetDuration time.Duration
 	// lastResetTime is protected by mtx. It is also used as created timestamp.
 	lastResetTime time.Time
+	// resetScheduled is protected by mtx. It is true if a reset is
+	// scheduled for a later time (when nativeHistogramMinResetDuration has
+	// passed).
+	resetScheduled bool
 
 	// now is for testing purposes, by default it's time.Now.
 	now func() time.Time
+
+	// afterFunc is for testing purposes, by default it's time.AfterFunc.
+	afterFunc func(time.Duration, func()) *time.Timer
 }
 
 func (h *histogram) Desc() *Desc {
@@ -874,21 +887,31 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket
 	if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
 		return
 	}
+	// One of the other strategies will happen. To undo what they will do as
+	// soon as enough time has passed to satisfy
+	// h.nativeHistogramMinResetDuration, schedule a reset at the right time
+	// if we haven't done so already.
+	if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
+		h.resetScheduled = true
+		h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
+	}
+
 	if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
 		return
 	}
 	h.doubleBucketWidth(hotCounts, coldCounts)
 }
 
-// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration
-// has been passed. It returns true if the histogram has been reset. The caller
-// must have locked h.mtx.
+// maybeReset resets the whole histogram if at least
+// h.nativeHistogramMinResetDuration has been passed. It returns true if the
+// histogram has been reset. The caller must have locked h.mtx.
 func (h *histogram) maybeReset(
 	hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
 ) bool {
 	// We are using the possibly mocked h.now() rather than
 	// time.Since(h.lastResetTime) to enable testing.
-	if h.nativeHistogramMinResetDuration == 0 ||
+	if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
+		h.resetScheduled || // Do not interefere if a reset is already scheduled.
 		h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
 		return false
 	}
@@ -906,6 +929,29 @@ func (h *histogram) maybeReset(
 	return true
 }
 
+// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
+// called without having locked h.mtx.
+func (h *histogram) reset() {
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	n := atomic.LoadUint64(&h.countAndHotIdx)
+	hotIdx := n >> 63
+	coldIdx := (^n) >> 63
+	hot := h.counts[hotIdx]
+	cold := h.counts[coldIdx]
+	// Completely reset coldCounts.
+	h.resetCounts(cold)
+	// Make coldCounts the new hot counts while resetting countAndHotIdx.
+	n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
+	count := n & ((1 << 63) - 1)
+	waitForCooldown(count, hot)
+	// Finally, reset the formerly hot counts, too.
+	h.resetCounts(hot)
+	h.lastResetTime = h.now()
+	h.resetScheduled = false
+}
+
 // maybeWidenZeroBucket widens the zero bucket until it includes the existing
 // buckets closest to the zero bucket (which could be two, if an equidistant
 // negative and a positive bucket exists, but usually it's only one bucket to be
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index b3c4eca2bc..c21911f292 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -165,6 +165,8 @@ func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
 
 func validateLabelValues(vals []string, expectedNumberOfValues int) error {
 	if len(vals) != expectedNumberOfValues {
+		// The call below makes vals escape, copy them to avoid that.
+		vals := append([]string(nil), vals...)
 		return fmt.Errorf(
 			"%w: expected %d label values but got %d in %#v",
 			errInconsistentCardinality, expectedNumberOfValues,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
index c0152cdb61..8c1136ceea 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
@@ -11,8 +11,8 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-//go:build !windows && !js
-// +build !windows,!js
+//go:build !windows && !js && !wasip1
+// +build !windows,!js,!wasip1
 
 package prometheus
 
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
similarity index 63%
rename from vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go
rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
index c318385cbe..d8d9a6d7a2 100644
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/v2/pbutil/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_wasip1.go
@@ -1,10 +1,9 @@
-// Copyright 2013 Matt T. Proud
-//
+// Copyright 2023 The Prometheus Authors
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
 // You may obtain a copy of the License at
 //
-//     http://www.apache.org/licenses/LICENSE-2.0
+// http://www.apache.org/licenses/LICENSE-2.0
 //
 // Unless required by applicable law or agreed to in writing, software
 // distributed under the License is distributed on an "AS IS" BASIS,
@@ -12,5 +11,16 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
-// Package pbutil provides record length-delimited Protocol Buffer streaming.
-package pbutil
+//go:build wasip1
+// +build wasip1
+
+package prometheus
+
+func canCollectProcess() bool {
+	return false
+}
+
+func (*processCollector) processCollect(chan<- Metric) {
+	// noop on this platform
+	return
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go
new file mode 100644
index 0000000000..9ba42826ad
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/problem.go
@@ -0,0 +1,33 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promlint
+
+import dto "github.com/prometheus/client_model/go"
+
+// A Problem is an issue detected by a linter.
+type Problem struct {
+	// The name of the metric indicated by this Problem.
+	Metric string
+
+	// A description of the issue for this Problem.
+	Text string
+}
+
+// newProblem is helper function to create a Problem.
+func newProblem(mf *dto.MetricFamily, text string) Problem {
+	return Problem{
+		Metric: mf.GetName(),
+		Text:   text,
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
index c8864b6c3f..dd29cccc30 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/promlint.go
@@ -16,15 +16,11 @@ package promlint
 
 import (
 	"errors"
-	"fmt"
 	"io"
-	"regexp"
 	"sort"
-	"strings"
-
-	"github.com/prometheus/common/expfmt"
 
 	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/expfmt"
 )
 
 // A Linter is a Prometheus metrics linter.  It identifies issues with metric
@@ -37,23 +33,8 @@ type Linter struct {
 	// of them.
 	r   io.Reader
 	mfs []*dto.MetricFamily
-}
 
-// A Problem is an issue detected by a Linter.
-type Problem struct {
-	// The name of the metric indicated by this Problem.
-	Metric string
-
-	// A description of the issue for this Problem.
-	Text string
-}
-
-// newProblem is helper function to create a Problem.
-func newProblem(mf *dto.MetricFamily, text string) Problem {
-	return Problem{
-		Metric: mf.GetName(),
-		Text:   text,
-	}
+	customValidations []Validation
 }
 
 // New creates a new Linter that reads an input stream of Prometheus metrics in
@@ -72,6 +53,14 @@ func NewWithMetricFamilies(mfs []*dto.MetricFamily) *Linter {
 	}
 }
 
+// AddCustomValidations adds custom validations to the linter.
+func (l *Linter) AddCustomValidations(vs ...Validation) {
+	if l.customValidations == nil {
+		l.customValidations = make([]Validation, 0, len(vs))
+	}
+	l.customValidations = append(l.customValidations, vs...)
+}
+
 // Lint performs a linting pass, returning a slice of Problems indicating any
 // issues found in the metrics stream. The slice is sorted by metric name
 // and issue description.
@@ -91,11 +80,11 @@ func (l *Linter) Lint() ([]Problem, error) {
 				return nil, err
 			}
 
-			problems = append(problems, lint(mf)...)
+			problems = append(problems, l.lint(mf)...)
 		}
 	}
 	for _, mf := range l.mfs {
-		problems = append(problems, lint(mf)...)
+		problems = append(problems, l.lint(mf)...)
 	}
 
 	// Ensure deterministic output.
@@ -110,276 +99,25 @@ func (l *Linter) Lint() ([]Problem, error) {
 }
 
 // lint is the entry point for linting a single metric.
-func lint(mf *dto.MetricFamily) []Problem {
-	fns := []func(mf *dto.MetricFamily) []Problem{
-		lintHelp,
-		lintMetricUnits,
-		lintCounter,
-		lintHistogramSummaryReserved,
-		lintMetricTypeInName,
-		lintReservedChars,
-		lintCamelCase,
-		lintUnitAbbreviations,
+func (l *Linter) lint(mf *dto.MetricFamily) []Problem {
+	var problems []Problem
+
+	for _, fn := range defaultValidations {
+		errs := fn(mf)
+		for _, err := range errs {
+			problems = append(problems, newProblem(mf, err.Error()))
+		}
 	}
 
-	var problems []Problem
-	for _, fn := range fns {
-		problems = append(problems, fn(mf)...)
+	if l.customValidations != nil {
+		for _, fn := range l.customValidations {
+			errs := fn(mf)
+			for _, err := range errs {
+				problems = append(problems, newProblem(mf, err.Error()))
+			}
+		}
 	}
 
 	// TODO(mdlayher): lint rules for specific metrics types.
 	return problems
 }
-
-// lintHelp detects issues related to the help text for a metric.
-func lintHelp(mf *dto.MetricFamily) []Problem {
-	var problems []Problem
-
-	// Expect all metrics to have help text available.
-	if mf.Help == nil {
-		problems = append(problems, newProblem(mf, "no help text"))
-	}
-
-	return problems
-}
-
-// lintMetricUnits detects issues with metric unit names.
-func lintMetricUnits(mf *dto.MetricFamily) []Problem {
-	var problems []Problem
-
-	unit, base, ok := metricUnits(*mf.Name)
-	if !ok {
-		// No known units detected.
-		return nil
-	}
-
-	// Unit is already a base unit.
-	if unit == base {
-		return nil
-	}
-
-	problems = append(problems, newProblem(mf, fmt.Sprintf("use base unit %q instead of %q", base, unit)))
-
-	return problems
-}
-
-// lintCounter detects issues specific to counters, as well as patterns that should
-// only be used with counters.
-func lintCounter(mf *dto.MetricFamily) []Problem {
-	var problems []Problem
-
-	isCounter := mf.GetType() == dto.MetricType_COUNTER
-	isUntyped := mf.GetType() == dto.MetricType_UNTYPED
-	hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total")
-
-	switch {
-	case isCounter && !hasTotalSuffix:
-		problems = append(problems, newProblem(mf, `counter metrics should have "_total" suffix`))
-	case !isUntyped && !isCounter && hasTotalSuffix:
-		problems = append(problems, newProblem(mf, `non-counter metrics should not have "_total" suffix`))
-	}
-
-	return problems
-}
-
-// lintHistogramSummaryReserved detects when other types of metrics use names or labels
-// reserved for use by histograms and/or summaries.
-func lintHistogramSummaryReserved(mf *dto.MetricFamily) []Problem {
-	// These rules do not apply to untyped metrics.
-	t := mf.GetType()
-	if t == dto.MetricType_UNTYPED {
-		return nil
-	}
-
-	var problems []Problem
-
-	isHistogram := t == dto.MetricType_HISTOGRAM
-	isSummary := t == dto.MetricType_SUMMARY
-
-	n := mf.GetName()
-
-	if !isHistogram && strings.HasSuffix(n, "_bucket") {
-		problems = append(problems, newProblem(mf, `non-histogram metrics should not have "_bucket" suffix`))
-	}
-	if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") {
-		problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_count" suffix`))
-	}
-	if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") {
-		problems = append(problems, newProblem(mf, `non-histogram and non-summary metrics should not have "_sum" suffix`))
-	}
-
-	for _, m := range mf.GetMetric() {
-		for _, l := range m.GetLabel() {
-			ln := l.GetName()
-
-			if !isHistogram && ln == "le" {
-				problems = append(problems, newProblem(mf, `non-histogram metrics should not have "le" label`))
-			}
-			if !isSummary && ln == "quantile" {
-				problems = append(problems, newProblem(mf, `non-summary metrics should not have "quantile" label`))
-			}
-		}
-	}
-
-	return problems
-}
-
-// lintMetricTypeInName detects when metric types are included in the metric name.
-func lintMetricTypeInName(mf *dto.MetricFamily) []Problem {
-	var problems []Problem
-	n := strings.ToLower(mf.GetName())
-
-	for i, t := range dto.MetricType_name {
-		if i == int32(dto.MetricType_UNTYPED) {
-			continue
-		}
-
-		typename := strings.ToLower(t)
-		if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) {
-			problems = append(problems, newProblem(mf, fmt.Sprintf(`metric name should not include type '%s'`, typename)))
-		}
-	}
-	return problems
-}
-
-// lintReservedChars detects colons in metric names.
-func lintReservedChars(mf *dto.MetricFamily) []Problem {
-	var problems []Problem
-	if strings.Contains(mf.GetName(), ":") {
-		problems = append(problems, newProblem(mf, "metric names should not contain ':'"))
-	}
-	return problems
-}
-
-var camelCase = regexp.MustCompile(`[a-z][A-Z]`)
-
-// lintCamelCase detects metric names and label names written in camelCase.
-func lintCamelCase(mf *dto.MetricFamily) []Problem {
-	var problems []Problem
-	if camelCase.FindString(mf.GetName()) != "" {
-		problems = append(problems, newProblem(mf, "metric names should be written in 'snake_case' not 'camelCase'"))
-	}
-
-	for _, m := range mf.GetMetric() {
-		for _, l := range m.GetLabel() {
-			if camelCase.FindString(l.GetName()) != "" {
-				problems = append(problems, newProblem(mf, "label names should be written in 'snake_case' not 'camelCase'"))
-			}
-		}
-	}
-	return problems
-}
-
-// lintUnitAbbreviations detects abbreviated units in the metric name.
-func lintUnitAbbreviations(mf *dto.MetricFamily) []Problem {
-	var problems []Problem
-	n := strings.ToLower(mf.GetName())
-	for _, s := range unitAbbreviations {
-		if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) {
-			problems = append(problems, newProblem(mf, "metric names should not contain abbreviated units"))
-		}
-	}
-	return problems
-}
-
-// metricUnits attempts to detect known unit types used as part of a metric name,
-// e.g. "foo_bytes_total" or "bar_baz_milligrams".
-func metricUnits(m string) (unit, base string, ok bool) {
-	ss := strings.Split(m, "_")
-
-	for _, s := range ss {
-		if base, found := units[s]; found {
-			return s, base, true
-		}
-
-		for _, p := range unitPrefixes {
-			if strings.HasPrefix(s, p) {
-				if base, found := units[s[len(p):]]; found {
-					return s, base, true
-				}
-			}
-		}
-	}
-
-	return "", "", false
-}
-
-// Units and their possible prefixes recognized by this library.  More can be
-// added over time as needed.
-var (
-	// map a unit to the appropriate base unit.
-	units = map[string]string{
-		// Base units.
-		"amperes": "amperes",
-		"bytes":   "bytes",
-		"celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases.
-		"grams":   "grams",
-		"joules":  "joules",
-		"kelvin":  "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements).
-		"meters":  "meters", // Both American and international spelling permitted.
-		"metres":  "metres",
-		"seconds": "seconds",
-		"volts":   "volts",
-
-		// Non base units.
-		// Time.
-		"minutes": "seconds",
-		"hours":   "seconds",
-		"days":    "seconds",
-		"weeks":   "seconds",
-		// Temperature.
-		"kelvins":    "kelvin",
-		"fahrenheit": "celsius",
-		"rankine":    "celsius",
-		// Length.
-		"inches": "meters",
-		"yards":  "meters",
-		"miles":  "meters",
-		// Bytes.
-		"bits": "bytes",
-		// Energy.
-		"calories": "joules",
-		// Mass.
-		"pounds": "grams",
-		"ounces": "grams",
-	}
-
-	unitPrefixes = []string{
-		"pico",
-		"nano",
-		"micro",
-		"milli",
-		"centi",
-		"deci",
-		"deca",
-		"hecto",
-		"kilo",
-		"kibi",
-		"mega",
-		"mibi",
-		"giga",
-		"gibi",
-		"tera",
-		"tebi",
-		"peta",
-		"pebi",
-	}
-
-	// Common abbreviations that we'd like to discourage.
-	unitAbbreviations = []string{
-		"s",
-		"ms",
-		"us",
-		"ns",
-		"sec",
-		"b",
-		"kb",
-		"mb",
-		"gb",
-		"tb",
-		"pb",
-		"m",
-		"h",
-		"d",
-	}
-)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go
new file mode 100644
index 0000000000..f52ad9eab6
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validation.go
@@ -0,0 +1,33 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promlint
+
+import (
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/prometheus/client_golang/prometheus/testutil/promlint/validations"
+)
+
+type Validation = func(mf *dto.MetricFamily) []error
+
+var defaultValidations = []Validation{
+	validations.LintHelp,
+	validations.LintMetricUnits,
+	validations.LintCounter,
+	validations.LintHistogramSummaryReserved,
+	validations.LintMetricTypeInName,
+	validations.LintReservedChars,
+	validations.LintCamelCase,
+	validations.LintUnitAbbreviations,
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go
new file mode 100644
index 0000000000..f2c2c3905d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/counter_validations.go
@@ -0,0 +1,40 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validations
+
+import (
+	"errors"
+	"strings"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// LintCounter detects issues specific to counters, as well as patterns that should
+// only be used with counters.
+func LintCounter(mf *dto.MetricFamily) []error {
+	var problems []error
+
+	isCounter := mf.GetType() == dto.MetricType_COUNTER
+	isUntyped := mf.GetType() == dto.MetricType_UNTYPED
+	hasTotalSuffix := strings.HasSuffix(mf.GetName(), "_total")
+
+	switch {
+	case isCounter && !hasTotalSuffix:
+		problems = append(problems, errors.New(`counter metrics should have "_total" suffix`))
+	case !isUntyped && !isCounter && hasTotalSuffix:
+		problems = append(problems, errors.New(`non-counter metrics should not have "_total" suffix`))
+	}
+
+	return problems
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go
new file mode 100644
index 0000000000..bc8dbd1e16
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/generic_name_validations.go
@@ -0,0 +1,101 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validations
+
+import (
+	"errors"
+	"fmt"
+	"regexp"
+	"strings"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+var camelCase = regexp.MustCompile(`[a-z][A-Z]`)
+
+// LintMetricUnits detects issues with metric unit names.
+func LintMetricUnits(mf *dto.MetricFamily) []error {
+	var problems []error
+
+	unit, base, ok := metricUnits(*mf.Name)
+	if !ok {
+		// No known units detected.
+		return nil
+	}
+
+	// Unit is already a base unit.
+	if unit == base {
+		return nil
+	}
+
+	problems = append(problems, fmt.Errorf("use base unit %q instead of %q", base, unit))
+
+	return problems
+}
+
+// LintMetricTypeInName detects when metric types are included in the metric name.
+func LintMetricTypeInName(mf *dto.MetricFamily) []error {
+	var problems []error
+	n := strings.ToLower(mf.GetName())
+
+	for i, t := range dto.MetricType_name {
+		if i == int32(dto.MetricType_UNTYPED) {
+			continue
+		}
+
+		typename := strings.ToLower(t)
+		if strings.Contains(n, "_"+typename+"_") || strings.HasSuffix(n, "_"+typename) {
+			problems = append(problems, fmt.Errorf(`metric name should not include type '%s'`, typename))
+		}
+	}
+	return problems
+}
+
+// LintReservedChars detects colons in metric names.
+func LintReservedChars(mf *dto.MetricFamily) []error {
+	var problems []error
+	if strings.Contains(mf.GetName(), ":") {
+		problems = append(problems, errors.New("metric names should not contain ':'"))
+	}
+	return problems
+}
+
+// LintCamelCase detects metric names and label names written in camelCase.
+func LintCamelCase(mf *dto.MetricFamily) []error {
+	var problems []error
+	if camelCase.FindString(mf.GetName()) != "" {
+		problems = append(problems, errors.New("metric names should be written in 'snake_case' not 'camelCase'"))
+	}
+
+	for _, m := range mf.GetMetric() {
+		for _, l := range m.GetLabel() {
+			if camelCase.FindString(l.GetName()) != "" {
+				problems = append(problems, errors.New("label names should be written in 'snake_case' not 'camelCase'"))
+			}
+		}
+	}
+	return problems
+}
+
+// LintUnitAbbreviations detects abbreviated units in the metric name.
+func LintUnitAbbreviations(mf *dto.MetricFamily) []error {
+	var problems []error
+	n := strings.ToLower(mf.GetName())
+	for _, s := range unitAbbreviations {
+		if strings.Contains(n, "_"+s+"_") || strings.HasSuffix(n, "_"+s) {
+			problems = append(problems, errors.New("metric names should not contain abbreviated units"))
+		}
+	}
+	return problems
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go
new file mode 100644
index 0000000000..1df2944689
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/help_validations.go
@@ -0,0 +1,32 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validations
+
+import (
+	"errors"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// LintHelp detects issues related to the help text for a metric.
+func LintHelp(mf *dto.MetricFamily) []error {
+	var problems []error
+
+	// Expect all metrics to have help text available.
+	if mf.Help == nil {
+		problems = append(problems, errors.New("no help text"))
+	}
+
+	return problems
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go
new file mode 100644
index 0000000000..6564bdf366
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/histogram_validations.go
@@ -0,0 +1,63 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validations
+
+import (
+	"errors"
+	"strings"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// LintHistogramSummaryReserved detects when other types of metrics use names or labels
+// reserved for use by histograms and/or summaries.
+func LintHistogramSummaryReserved(mf *dto.MetricFamily) []error {
+	// These rules do not apply to untyped metrics.
+	t := mf.GetType()
+	if t == dto.MetricType_UNTYPED {
+		return nil
+	}
+
+	var problems []error
+
+	isHistogram := t == dto.MetricType_HISTOGRAM
+	isSummary := t == dto.MetricType_SUMMARY
+
+	n := mf.GetName()
+
+	if !isHistogram && strings.HasSuffix(n, "_bucket") {
+		problems = append(problems, errors.New(`non-histogram metrics should not have "_bucket" suffix`))
+	}
+	if !isHistogram && !isSummary && strings.HasSuffix(n, "_count") {
+		problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_count" suffix`))
+	}
+	if !isHistogram && !isSummary && strings.HasSuffix(n, "_sum") {
+		problems = append(problems, errors.New(`non-histogram and non-summary metrics should not have "_sum" suffix`))
+	}
+
+	for _, m := range mf.GetMetric() {
+		for _, l := range m.GetLabel() {
+			ln := l.GetName()
+
+			if !isHistogram && ln == "le" {
+				problems = append(problems, errors.New(`non-histogram metrics should not have "le" label`))
+			}
+			if !isSummary && ln == "quantile" {
+				problems = append(problems, errors.New(`non-summary metrics should not have "quantile" label`))
+			}
+		}
+	}
+
+	return problems
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go
new file mode 100644
index 0000000000..967977d2b0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/units.go
@@ -0,0 +1,118 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package validations
+
+import "strings"
+
+// Units and their possible prefixes recognized by this library.  More can be
+// added over time as needed.
+var (
+	// map a unit to the appropriate base unit.
+	units = map[string]string{
+		// Base units.
+		"amperes": "amperes",
+		"bytes":   "bytes",
+		"celsius": "celsius", // Also allow Celsius because it is common in typical Prometheus use cases.
+		"grams":   "grams",
+		"joules":  "joules",
+		"kelvin":  "kelvin", // SI base unit, used in special cases (e.g. color temperature, scientific measurements).
+		"meters":  "meters", // Both American and international spelling permitted.
+		"metres":  "metres",
+		"seconds": "seconds",
+		"volts":   "volts",
+
+		// Non base units.
+		// Time.
+		"minutes": "seconds",
+		"hours":   "seconds",
+		"days":    "seconds",
+		"weeks":   "seconds",
+		// Temperature.
+		"kelvins":    "kelvin",
+		"fahrenheit": "celsius",
+		"rankine":    "celsius",
+		// Length.
+		"inches": "meters",
+		"yards":  "meters",
+		"miles":  "meters",
+		// Bytes.
+		"bits": "bytes",
+		// Energy.
+		"calories": "joules",
+		// Mass.
+		"pounds": "grams",
+		"ounces": "grams",
+	}
+
+	unitPrefixes = []string{
+		"pico",
+		"nano",
+		"micro",
+		"milli",
+		"centi",
+		"deci",
+		"deca",
+		"hecto",
+		"kilo",
+		"kibi",
+		"mega",
+		"mibi",
+		"giga",
+		"gibi",
+		"tera",
+		"tebi",
+		"peta",
+		"pebi",
+	}
+
+	// Common abbreviations that we'd like to discourage.
+	unitAbbreviations = []string{
+		"s",
+		"ms",
+		"us",
+		"ns",
+		"sec",
+		"b",
+		"kb",
+		"mb",
+		"gb",
+		"tb",
+		"pb",
+		"m",
+		"h",
+		"d",
+	}
+)
+
+// metricUnits attempts to detect known unit types used as part of a metric name,
+// e.g. "foo_bytes_total" or "bar_baz_milligrams".
+func metricUnits(m string) (unit, base string, ok bool) {
+	ss := strings.Split(m, "_")
+
+	for _, s := range ss {
+		if base, found := units[s]; found {
+			return s, base, true
+		}
+
+		for _, p := range unitPrefixes {
+			if strings.HasPrefix(s, p) {
+				if base, found := units[s[len(p):]]; found {
+					return s, base, true
+				}
+			}
+		}
+	}
+
+	return "", "", false
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
index 82d4a5436b..269f56435b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
@@ -47,6 +47,7 @@ import (
 	"github.com/davecgh/go-spew/spew"
 	dto "github.com/prometheus/client_model/go"
 	"github.com/prometheus/common/expfmt"
+	"google.golang.org/protobuf/proto"
 
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/client_golang/prometheus/internal"
@@ -230,6 +231,20 @@ func convertReaderToMetricFamily(reader io.Reader) ([]*dto.MetricFamily, error)
 		return nil, fmt.Errorf("converting reader to metric families failed: %w", err)
 	}
 
+	// The text protocol handles empty help fields inconsistently. When
+	// encoding, any non-nil value, include the empty string, produces a
+	// "# HELP" line. But when decoding, the help field is only set to a
+	// non-nil value if the "# HELP" line contains a non-empty value.
+	//
+	// Because metrics in a registry always have non-nil help fields, populate
+	// any nil help fields in the parsed metrics with the empty string so that
+	// when we compare text encodings, the results are consistent.
+	for _, metric := range notNormalized {
+		if metric.Help == nil {
+			metric.Help = proto.String("")
+		}
+	}
+
 	return internal.NormalizeMetricFamilies(notNormalized), nil
 }
 
diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go
index 4763549b8f..4a926e8d06 100644
--- a/vendor/github.com/prometheus/common/config/http_config.go
+++ b/vendor/github.com/prometheus/common/config/http_config.go
@@ -546,10 +546,10 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT
 
 		// If a authorization_credentials is provided, create a round tripper that will set the
 		// Authorization header correctly on each request.
-		if cfg.Authorization != nil && len(cfg.Authorization.Credentials) > 0 {
-			rt = NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, cfg.Authorization.Credentials, rt)
-		} else if cfg.Authorization != nil && len(cfg.Authorization.CredentialsFile) > 0 {
+		if cfg.Authorization != nil && len(cfg.Authorization.CredentialsFile) > 0 {
 			rt = NewAuthorizationCredentialsFileRoundTripper(cfg.Authorization.Type, cfg.Authorization.CredentialsFile, rt)
+		} else if cfg.Authorization != nil {
+			rt = NewAuthorizationCredentialsRoundTripper(cfg.Authorization.Type, cfg.Authorization.Credentials, rt)
 		}
 		// Backwards compatibility, be nice with importers who would not have
 		// called Validate().
@@ -630,7 +630,7 @@ func (rt *authorizationCredentialsFileRoundTripper) RoundTrip(req *http.Request)
 	if len(req.Header.Get("Authorization")) == 0 {
 		b, err := os.ReadFile(rt.authCredentialsFile)
 		if err != nil {
-			return nil, fmt.Errorf("unable to read authorization credentials file %s: %s", rt.authCredentialsFile, err)
+			return nil, fmt.Errorf("unable to read authorization credentials file %s: %w", rt.authCredentialsFile, err)
 		}
 		authCredentials := strings.TrimSpace(string(b))
 
@@ -670,7 +670,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
 	if rt.usernameFile != "" {
 		usernameBytes, err := os.ReadFile(rt.usernameFile)
 		if err != nil {
-			return nil, fmt.Errorf("unable to read basic auth username file %s: %s", rt.usernameFile, err)
+			return nil, fmt.Errorf("unable to read basic auth username file %s: %w", rt.usernameFile, err)
 		}
 		username = strings.TrimSpace(string(usernameBytes))
 	} else {
@@ -679,7 +679,7 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
 	if rt.passwordFile != "" {
 		passwordBytes, err := os.ReadFile(rt.passwordFile)
 		if err != nil {
-			return nil, fmt.Errorf("unable to read basic auth password file %s: %s", rt.passwordFile, err)
+			return nil, fmt.Errorf("unable to read basic auth password file %s: %w", rt.passwordFile, err)
 		}
 		password = strings.TrimSpace(string(passwordBytes))
 	} else {
@@ -723,7 +723,7 @@ func (rt *oauth2RoundTripper) RoundTrip(req *http.Request) (*http.Response, erro
 	if rt.config.ClientSecretFile != "" {
 		data, err := os.ReadFile(rt.config.ClientSecretFile)
 		if err != nil {
-			return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %s", rt.config.ClientSecretFile, err)
+			return nil, fmt.Errorf("unable to read oauth2 client secret file %s: %w", rt.config.ClientSecretFile, err)
 		}
 		secret = strings.TrimSpace(string(data))
 		rt.mtx.RLock()
@@ -977,7 +977,7 @@ func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Ce
 	if c.CertFile != "" {
 		certData, err = os.ReadFile(c.CertFile)
 		if err != nil {
-			return nil, fmt.Errorf("unable to read specified client cert (%s): %s", c.CertFile, err)
+			return nil, fmt.Errorf("unable to read specified client cert (%s): %w", c.CertFile, err)
 		}
 	} else {
 		certData = []byte(c.Cert)
@@ -986,7 +986,7 @@ func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Ce
 	if c.KeyFile != "" {
 		keyData, err = os.ReadFile(c.KeyFile)
 		if err != nil {
-			return nil, fmt.Errorf("unable to read specified client key (%s): %s", c.KeyFile, err)
+			return nil, fmt.Errorf("unable to read specified client key (%s): %w", c.KeyFile, err)
 		}
 	} else {
 		keyData = []byte(c.Key)
@@ -994,7 +994,7 @@ func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Ce
 
 	cert, err := tls.X509KeyPair(certData, keyData)
 	if err != nil {
-		return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err)
+		return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %w", c.CertFile, c.KeyFile, err)
 	}
 
 	return &cert, nil
@@ -1004,7 +1004,7 @@ func (c *TLSConfig) getClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Ce
 func readCAFile(f string) ([]byte, error) {
 	data, err := os.ReadFile(f)
 	if err != nil {
-		return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err)
+		return nil, fmt.Errorf("unable to load specified CA cert %s: %w", f, err)
 	}
 	return data, nil
 }
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 0ca86a3dc7..a909b171c8 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -14,6 +14,7 @@
 package expfmt
 
 import (
+	"bufio"
 	"fmt"
 	"io"
 	"math"
@@ -21,8 +22,8 @@ import (
 	"net/http"
 
 	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/encoding/protodelim"
 
-	"github.com/matttproud/golang_protobuf_extensions/v2/pbutil"
 	"github.com/prometheus/common/model"
 )
 
@@ -86,8 +87,10 @@ type protoDecoder struct {
 
 // Decode implements the Decoder interface.
 func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
-	_, err := pbutil.ReadDelimited(d.r, v)
-	if err != nil {
+	opts := protodelim.UnmarshalOptions{
+		MaxSize: -1,
+	}
+	if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil {
 		return err
 	}
 	if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index ca21406000..02b7a5e812 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,10 +18,11 @@ import (
 	"io"
 	"net/http"
 
-	"github.com/matttproud/golang_protobuf_extensions/v2/pbutil"
-	"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+	"google.golang.org/protobuf/encoding/protodelim"
 	"google.golang.org/protobuf/encoding/prototext"
 
+	"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
+
 	dto "github.com/prometheus/client_model/go"
 )
 
@@ -120,7 +121,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
 	case FmtProtoDelim:
 		return encoderCloser{
 			encode: func(v *dto.MetricFamily) error {
-				_, err := pbutil.WriteDelimited(w, v)
+				_, err := protodelim.MarshalTo(w, v)
 				return err
 			},
 			close: func() error { return nil },
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index 35db1cc9d7..26490211af 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -16,6 +16,7 @@ package expfmt
 import (
 	"bufio"
 	"bytes"
+	"errors"
 	"fmt"
 	"io"
 	"math"
@@ -24,8 +25,9 @@ import (
 
 	dto "github.com/prometheus/client_model/go"
 
-	"github.com/prometheus/common/model"
 	"google.golang.org/protobuf/proto"
+
+	"github.com/prometheus/common/model"
 )
 
 // A stateFn is a function that represents a state in a state machine. By
@@ -112,7 +114,7 @@ func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricF
 	// stream. Turn this error into something nicer and more
 	// meaningful. (io.EOF is often used as a signal for the legitimate end
 	// of an input stream.)
-	if p.err == io.EOF {
+	if p.err != nil && errors.Is(p.err, io.EOF) {
 		p.parseError("unexpected end of input stream")
 	}
 	return p.metricFamiliesByName, p.err
@@ -146,7 +148,7 @@ func (p *TextParser) startOfLine() stateFn {
 		// which is not an error but the signal that we are done.
 		// Any other error that happens to align with the start of
 		// a line is still an error.
-		if p.err == io.EOF {
+		if errors.Is(p.err, io.EOF) {
 			p.err = nil
 		}
 		return nil
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index 35e739c7ad..178fdbaf61 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -90,13 +90,13 @@ func (a *Alert) Validate() error {
 		return fmt.Errorf("start time must be before end time")
 	}
 	if err := a.Labels.Validate(); err != nil {
-		return fmt.Errorf("invalid label set: %s", err)
+		return fmt.Errorf("invalid label set: %w", err)
 	}
 	if len(a.Labels) == 0 {
 		return fmt.Errorf("at least one label pair required")
 	}
 	if err := a.Annotations.Validate(); err != nil {
-		return fmt.Errorf("invalid annotations: %s", err)
+		return fmt.Errorf("invalid annotations: %w", err)
 	}
 	return nil
 }
diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go
new file mode 100644
index 0000000000..447ab8ad63
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metadata.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// MetricType represents metric type values.
+type MetricType string
+
+const (
+	MetricTypeCounter        = MetricType("counter")
+	MetricTypeGauge          = MetricType("gauge")
+	MetricTypeHistogram      = MetricType("histogram")
+	MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+	MetricTypeSummary        = MetricType("summary")
+	MetricTypeInfo           = MetricType("info")
+	MetricTypeStateset       = MetricType("stateset")
+	MetricTypeUnknown        = MetricType("unknown")
+)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 00804b7fed..f8c5eabaa9 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -20,12 +20,10 @@ import (
 	"strings"
 )
 
-var (
-	// MetricNameRE is a regular expression matching valid metric
-	// names. Note that the IsValidMetricName function performs the same
-	// check but faster than a match with this regular expression.
-	MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
-)
+// MetricNameRE is a regular expression matching valid metric
+// names. Note that the IsValidMetricName function performs the same
+// check but faster than a match with this regular expression.
+var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
 
 // A Metric is similar to a LabelSet, but the key difference is that a Metric is
 // a singleton and refers to one and only one stream of samples.
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
index 8762b13c63..dc8a0026c4 100644
--- a/vendor/github.com/prometheus/common/model/signature.go
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -22,10 +22,8 @@ import (
 // when calculating their combined hash value (aka signature aka fingerprint).
 const SeparatorByte byte = 255
 
-var (
-	// cache the signature of an empty label set.
-	emptyLabelSignature = hashNew()
-)
+// cache the signature of an empty label set.
+var emptyLabelSignature = hashNew()
 
 // LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
 // given label set. (Collisions are possible but unlikely if the number of label
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
index bb99889d2c..910b0b71fc 100644
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -81,7 +81,7 @@ func (s *Silence) Validate() error {
 	}
 	for _, m := range s.Matchers {
 		if err := m.Validate(); err != nil {
-			return fmt.Errorf("invalid matcher: %s", err)
+			return fmt.Errorf("invalid matcher: %w", err)
 		}
 	}
 	if s.StartsAt.IsZero() {
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index 9eb440413f..8050637d82 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -21,14 +21,12 @@ import (
 	"strings"
 )
 
-var (
-	// ZeroSample is the pseudo zero-value of Sample used to signal a
-	// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
-	// and metric nil. Note that the natural zero value of Sample has a timestamp
-	// of 0, which is possible to appear in a real Sample and thus not suitable
-	// to signal a non-existing Sample.
-	ZeroSample = Sample{Timestamp: Earliest}
-)
+// ZeroSample is the pseudo zero-value of Sample used to signal a
+// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+// and metric nil. Note that the natural zero value of Sample has a timestamp
+// of 0, which is possible to appear in a real Sample and thus not suitable
+// to signal a non-existing Sample.
+var ZeroSample = Sample{Timestamp: Earliest}
 
 // Sample is a sample pair associated with a metric. A single sample must either
 // define Value or Histogram but not both. Histogram == nil implies the Value
@@ -274,7 +272,7 @@ func (s *Scalar) UnmarshalJSON(b []byte) error {
 
 	value, err := strconv.ParseFloat(f, 64)
 	if err != nil {
-		return fmt.Errorf("error parsing sample value: %s", err)
+		return fmt.Errorf("error parsing sample value: %w", err)
 	}
 	s.Value = SampleValue(value)
 	return nil
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
index 0f615a7053..ae35cc2ab4 100644
--- a/vendor/github.com/prometheus/common/model/value_float.go
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -20,14 +20,12 @@ import (
 	"strconv"
 )
 
-var (
-	// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
-	// non-existing sample pair. It is a SamplePair with timestamp Earliest and
-	// value 0.0. Note that the natural zero value of SamplePair has a timestamp
-	// of 0, which is possible to appear in a real SamplePair and thus not
-	// suitable to signal a non-existing SamplePair.
-	ZeroSamplePair = SamplePair{Timestamp: Earliest}
-)
+// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+// of 0, which is possible to appear in a real SamplePair and thus not
+// suitable to signal a non-existing SamplePair.
+var ZeroSamplePair = SamplePair{Timestamp: Earliest}
 
 // A SampleValue is a representation of a value for a given sample at a given
 // time.
diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go
index 00caa0ba47..28884dbc39 100644
--- a/vendor/github.com/prometheus/common/version/info.go
+++ b/vendor/github.com/prometheus/common/version/info.go
@@ -48,12 +48,12 @@ func NewCollector(program string) prometheus.Collector {
 			),
 			ConstLabels: prometheus.Labels{
 				"version":   Version,
-				"revision":  getRevision(),
+				"revision":  GetRevision(),
 				"branch":    Branch,
 				"goversion": GoVersion,
 				"goos":      GoOS,
 				"goarch":    GoArch,
-				"tags":      getTags(),
+				"tags":      GetTags(),
 			},
 		},
 		func() float64 { return 1 },
@@ -75,13 +75,13 @@ func Print(program string) string {
 	m := map[string]string{
 		"program":   program,
 		"version":   Version,
-		"revision":  getRevision(),
+		"revision":  GetRevision(),
 		"branch":    Branch,
 		"buildUser": BuildUser,
 		"buildDate": BuildDate,
 		"goVersion": GoVersion,
 		"platform":  GoOS + "/" + GoArch,
-		"tags":      getTags(),
+		"tags":      GetTags(),
 	}
 	t := template.Must(template.New("version").Parse(versionInfoTmpl))
 
@@ -94,10 +94,10 @@ func Print(program string) string {
 
 // Info returns version, branch and revision information.
 func Info() string {
-	return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, getRevision())
+	return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, GetRevision())
 }
 
 // BuildContext returns goVersion, platform, buildUser and buildDate information.
 func BuildContext() string {
-	return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s, tags=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate, getTags())
+	return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s, tags=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate, GetTags())
 }
diff --git a/vendor/github.com/prometheus/common/version/info_default.go b/vendor/github.com/prometheus/common/version/info_default.go
index 8eb3a0bf17..684996f10a 100644
--- a/vendor/github.com/prometheus/common/version/info_default.go
+++ b/vendor/github.com/prometheus/common/version/info_default.go
@@ -16,7 +16,7 @@
 
 package version
 
-func getRevision() string {
+func GetRevision() string {
 	return Revision
 }
 
diff --git a/vendor/github.com/prometheus/common/version/info_go118.go b/vendor/github.com/prometheus/common/version/info_go118.go
index bfc7d41038..992623c6cc 100644
--- a/vendor/github.com/prometheus/common/version/info_go118.go
+++ b/vendor/github.com/prometheus/common/version/info_go118.go
@@ -18,17 +18,19 @@ package version
 
 import "runtime/debug"
 
-var computedRevision string
-var computedTags string
+var (
+	computedRevision string
+	computedTags     string
+)
 
-func getRevision() string {
+func GetRevision() string {
 	if Revision != "" {
 		return Revision
 	}
 	return computedRevision
 }
 
-func getTags() string {
+func GetTags() string {
 	return computedTags
 }
 
diff --git a/vendor/github.com/urfave/cli/v2/.golangci.yaml b/vendor/github.com/urfave/cli/v2/.golangci.yaml
new file mode 100644
index 0000000000..89b6e86615
--- /dev/null
+++ b/vendor/github.com/urfave/cli/v2/.golangci.yaml
@@ -0,0 +1,4 @@
+# https://golangci-lint.run/usage/configuration/
+linters:
+  enable:
+    - misspell
diff --git a/vendor/github.com/urfave/cli/v2/app.go b/vendor/github.com/urfave/cli/v2/app.go
index f69a1939b1..6e6f6d0d63 100644
--- a/vendor/github.com/urfave/cli/v2/app.go
+++ b/vendor/github.com/urfave/cli/v2/app.go
@@ -23,8 +23,8 @@ var (
 		fmt.Sprintf("See %s", appActionDeprecationURL), 2)
 	ignoreFlagPrefix = "test." // this is to ignore test flags when adding flags from other packages
 
-	SuggestFlag               SuggestFlagFunc    = suggestFlag
-	SuggestCommand            SuggestCommandFunc = suggestCommand
+	SuggestFlag               SuggestFlagFunc    = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest
+	SuggestCommand            SuggestCommandFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest
 	SuggestDidYouMeanTemplate string             = suggestDidYouMeanTemplate
 )
 
@@ -39,6 +39,8 @@ type App struct {
 	Usage string
 	// Text to override the USAGE section of help
 	UsageText string
+	// Whether this command supports arguments
+	Args bool
 	// Description of the program argument format.
 	ArgsUsage string
 	// Version of the program
@@ -366,6 +368,9 @@ func (a *App) suggestFlagFromError(err error, command string) (string, error) {
 		hideHelp = hideHelp || cmd.HideHelp
 	}
 
+	if SuggestFlag == nil {
+		return "", err
+	}
 	suggestion := SuggestFlag(flags, flag, hideHelp)
 	if len(suggestion) == 0 {
 		return "", err
diff --git a/vendor/github.com/urfave/cli/v2/command.go b/vendor/github.com/urfave/cli/v2/command.go
index c318da989b..c20a571ee7 100644
--- a/vendor/github.com/urfave/cli/v2/command.go
+++ b/vendor/github.com/urfave/cli/v2/command.go
@@ -20,6 +20,8 @@ type Command struct {
 	UsageText string
 	// A longer explanation of how the command works
 	Description string
+	// Whether this command supports arguments
+	Args bool
 	// A short description of the arguments of this command
 	ArgsUsage string
 	// The category the command is part of
diff --git a/vendor/github.com/urfave/cli/v2/context.go b/vendor/github.com/urfave/cli/v2/context.go
index a45c120b55..8dd4765211 100644
--- a/vendor/github.com/urfave/cli/v2/context.go
+++ b/vendor/github.com/urfave/cli/v2/context.go
@@ -144,7 +144,7 @@ func (cCtx *Context) Lineage() []*Context {
 	return lineage
 }
 
-// Count returns the num of occurences of this flag
+// Count returns the num of occurrences of this flag
 func (cCtx *Context) Count(name string) int {
 	if fs := cCtx.lookupFlagSet(name); fs != nil {
 		if cf, ok := fs.Lookup(name).Value.(Countable); ok {
diff --git a/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go b/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go
index e845dd5257..d342018686 100644
--- a/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go
+++ b/vendor/github.com/urfave/cli/v2/flag_uint64_slice.go
@@ -190,6 +190,15 @@ func (f *Uint64SliceFlag) Get(ctx *Context) []uint64 {
 	return ctx.Uint64Slice(f.Name)
 }
 
+// RunAction executes flag action if set
+func (f *Uint64SliceFlag) RunAction(c *Context) error {
+	if f.Action != nil {
+		return f.Action(c, c.Uint64Slice(f.Name))
+	}
+
+	return nil
+}
+
 // Uint64Slice looks up the value of a local Uint64SliceFlag, returns
 // nil if not found
 func (cCtx *Context) Uint64Slice(name string) []uint64 {
diff --git a/vendor/github.com/urfave/cli/v2/flag_uint_slice.go b/vendor/github.com/urfave/cli/v2/flag_uint_slice.go
index d2aed480d9..4dc13e126a 100644
--- a/vendor/github.com/urfave/cli/v2/flag_uint_slice.go
+++ b/vendor/github.com/urfave/cli/v2/flag_uint_slice.go
@@ -201,6 +201,15 @@ func (f *UintSliceFlag) Get(ctx *Context) []uint {
 	return ctx.UintSlice(f.Name)
 }
 
+// RunAction executes flag action if set
+func (f *UintSliceFlag) RunAction(c *Context) error {
+	if f.Action != nil {
+		return f.Action(c, c.UintSlice(f.Name))
+	}
+
+	return nil
+}
+
 // UintSlice looks up the value of a local UintSliceFlag, returns
 // nil if not found
 func (cCtx *Context) UintSlice(name string) []uint {
diff --git a/vendor/github.com/urfave/cli/v2/godoc-current.txt b/vendor/github.com/urfave/cli/v2/godoc-current.txt
index 6016bd82e7..4b620feeb0 100644
--- a/vendor/github.com/urfave/cli/v2/godoc-current.txt
+++ b/vendor/github.com/urfave/cli/v2/godoc-current.txt
@@ -27,15 +27,15 @@ application:
 VARIABLES
 
 var (
-	SuggestFlag               SuggestFlagFunc    = suggestFlag
-	SuggestCommand            SuggestCommandFunc = suggestCommand
+	SuggestFlag               SuggestFlagFunc    = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest
+	SuggestCommand            SuggestCommandFunc = nil // initialized in suggestions.go unless built with urfave_cli_no_suggest
 	SuggestDidYouMeanTemplate string             = suggestDidYouMeanTemplate
 )
 var AppHelpTemplate = `NAME:
    {{template "helpNameTemplate" .}}
 
 USAGE:
-   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
+   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
 
 VERSION:
    {{.Version}}{{end}}{{end}}{{if .Description}}
@@ -136,7 +136,7 @@ var SubcommandHelpTemplate = `NAME:
    {{template "helpNameTemplate" .}}
 
 USAGE:
-   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}}
+   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}}
 
 DESCRIPTION:
    {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}}
@@ -253,6 +253,8 @@ type App struct {
 	Usage string
 	// Text to override the USAGE section of help
 	UsageText string
+	// Whether this command supports arguments
+	Args bool
 	// Description of the program argument format.
 	ArgsUsage string
 	// Version of the program
@@ -523,6 +525,8 @@ type Command struct {
 	UsageText string
 	// A longer explanation of how the command works
 	Description string
+	// Whether this command supports arguments
+	Args bool
 	// A short description of the arguments of this command
 	ArgsUsage string
 	// The category the command is part of
@@ -649,7 +653,7 @@ func (cCtx *Context) Bool(name string) bool
     Bool looks up the value of a local BoolFlag, returns false if not found
 
 func (cCtx *Context) Count(name string) int
-    Count returns the num of occurences of this flag
+    Count returns the num of occurrences of this flag
 
 func (cCtx *Context) Duration(name string) time.Duration
     Duration looks up the value of a local DurationFlag, returns 0 if not found
@@ -2142,6 +2146,9 @@ func (f *Uint64SliceFlag) IsVisible() bool
 func (f *Uint64SliceFlag) Names() []string
     Names returns the names of the flag
 
+func (f *Uint64SliceFlag) RunAction(c *Context) error
+    RunAction executes flag action if set
+
 func (f *Uint64SliceFlag) String() string
     String returns a readable representation of this value (for usage defaults)
 
@@ -2307,6 +2314,9 @@ func (f *UintSliceFlag) IsVisible() bool
 func (f *UintSliceFlag) Names() []string
     Names returns the names of the flag
 
+func (f *UintSliceFlag) RunAction(c *Context) error
+    RunAction executes flag action if set
+
 func (f *UintSliceFlag) String() string
     String returns a readable representation of this value (for usage defaults)
 
diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go
index 84bd77bc17..640e290452 100644
--- a/vendor/github.com/urfave/cli/v2/help.go
+++ b/vendor/github.com/urfave/cli/v2/help.go
@@ -278,7 +278,7 @@ func ShowCommandHelp(ctx *Context, command string) error {
 
 	if ctx.App.CommandNotFound == nil {
 		errMsg := fmt.Sprintf("No help topic for '%v'", command)
-		if ctx.App.Suggest {
+		if ctx.App.Suggest && SuggestCommand != nil {
 			if suggestion := SuggestCommand(ctx.Command.Subcommands, command); suggestion != "" {
 				errMsg += ". " + suggestion
 			}
diff --git a/vendor/github.com/urfave/cli/v2/suggestions.go b/vendor/github.com/urfave/cli/v2/suggestions.go
index c73a0012da..9d2b7a81e7 100644
--- a/vendor/github.com/urfave/cli/v2/suggestions.go
+++ b/vendor/github.com/urfave/cli/v2/suggestions.go
@@ -1,3 +1,6 @@
+//go:build !urfave_cli_no_suggest
+// +build !urfave_cli_no_suggest
+
 package cli
 
 import (
@@ -6,6 +9,11 @@ import (
 	"github.com/xrash/smetrics"
 )
 
+func init() {
+	SuggestFlag = suggestFlag
+	SuggestCommand = suggestCommand
+}
+
 func jaroWinkler(a, b string) float64 {
 	// magic values are from https://github.com/xrash/smetrics/blob/039620a656736e6ad994090895784a7af15e0b80/jaro-winkler.go#L8
 	const (
diff --git a/vendor/github.com/urfave/cli/v2/template.go b/vendor/github.com/urfave/cli/v2/template.go
index da98890ebb..daad470022 100644
--- a/vendor/github.com/urfave/cli/v2/template.go
+++ b/vendor/github.com/urfave/cli/v2/template.go
@@ -1,7 +1,7 @@
 package cli
 
 var helpNameTemplate = `{{$v := offset .HelpName 6}}{{wrap .HelpName 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}`
-var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}`
+var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}[arguments...]{{end}}{{end}}`
 var descriptionTemplate = `{{wrap .Description 3}}`
 var authorsTemplate = `{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}:
    {{range $index, $author := .Authors}}{{if $index}}
@@ -35,7 +35,7 @@ var AppHelpTemplate = `NAME:
    {{template "helpNameTemplate" .}}
 
 USAGE:
-   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
+   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
 
 VERSION:
    {{.Version}}{{end}}{{end}}{{if .Description}}
@@ -83,7 +83,7 @@ var SubcommandHelpTemplate = `NAME:
    {{template "helpNameTemplate" .}}
 
 USAGE:
-   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}}
+   {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}}
 
 DESCRIPTION:
    {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}}
diff --git a/vendor/github.com/xrash/smetrics/soundex.go b/vendor/github.com/xrash/smetrics/soundex.go
index a2ad034d5e..18c3aef724 100644
--- a/vendor/github.com/xrash/smetrics/soundex.go
+++ b/vendor/github.com/xrash/smetrics/soundex.go
@@ -6,36 +6,58 @@ import (
 
 // The Soundex encoding. It is a phonetic algorithm that considers how the words sound in English. Soundex maps a string to a 4-byte code consisting of the first letter of the original string and three numbers. Strings that sound similar should map to the same code.
 func Soundex(s string) string {
-	m := map[byte]string{
-		'B': "1", 'P': "1", 'F': "1", 'V': "1",
-		'C': "2", 'S': "2", 'K': "2", 'G': "2", 'J': "2", 'Q': "2", 'X': "2", 'Z': "2",
-		'D': "3", 'T': "3",
-		'L': "4",
-		'M': "5", 'N': "5",
-		'R': "6",
-	}
+	b := strings.Builder{}
+	b.Grow(4)
 
-	s = strings.ToUpper(s)
-
-	r := string(s[0])
 	p := s[0]
-	for i := 1; i < len(s) && len(r) < 4; i++ {
+	if p <= 'z' && p >= 'a' {
+		p -= 32 // convert to uppercase
+	}
+	b.WriteByte(p)
+
+	n := 0
+	for i := 1; i < len(s); i++ {
 		c := s[i]
 
-		if (c < 'A' || c > 'Z') || (c == p) {
+		if c <= 'z' && c >= 'a' {
+			c -= 32 // convert to uppercase
+		} else if c < 'A' || c > 'Z' {
+			continue
+		}
+
+		if c == p {
 			continue
 		}
 
 		p = c
 
-		if n, ok := m[c]; ok {
-			r += n
+		switch c {
+		case 'B', 'P', 'F', 'V':
+			c = '1'
+		case 'C', 'S', 'K', 'G', 'J', 'Q', 'X', 'Z':
+			c = '2'
+		case 'D', 'T':
+			c = '3'
+		case 'L':
+			c = '4'
+		case 'M', 'N':
+			c = '5'
+		case 'R':
+			c = '6'
+		default:
+			continue
+		}
+
+		b.WriteByte(c)
+		n++
+		if n == 3 {
+			break
 		}
 	}
 
-	for i := len(r); i < 4; i++ {
-		r += "0"
+	for i := n; i < 3; i++ {
+		b.WriteByte('0')
 	}
 
-	return r
+	return b.String()
 }
diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
index 2e0443dd3b..77a84e5175 100644
--- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
+++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go
@@ -129,6 +129,8 @@ func (v Value) getState() *internal.State {
 	return internal.GetValueState(internal.Value(v))
 }
 
+// FromRaw sets the value from the given raw value.
+// Calling this function on zero-initialized Value will cause a panic.
 func (v Value) FromRaw(iv any) error {
 	switch tv := iv.(type) {
 	case nil:
@@ -198,37 +200,31 @@ func (v Value) Type() ValueType {
 // Str returns the string value associated with this Value.
 // The shorter name is used instead of String to avoid implementing fmt.Stringer interface.
 // If the Type() is not ValueTypeStr then returns empty string.
-// Calling this function on zero-initialized Value will cause a panic.
 func (v Value) Str() string {
 	return v.getOrig().GetStringValue()
 }
 
 // Int returns the int64 value associated with this Value.
 // If the Type() is not ValueTypeInt then returns int64(0).
-// Calling this function on zero-initialized Value will cause a panic.
 func (v Value) Int() int64 {
 	return v.getOrig().GetIntValue()
 }
 
 // Double returns the float64 value associated with this Value.
 // If the Type() is not ValueTypeDouble then returns float64(0).
-// Calling this function on zero-initialized Value will cause a panic.
 func (v Value) Double() float64 {
 	return v.getOrig().GetDoubleValue()
 }
 
 // Bool returns the bool value associated with this Value.
 // If the Type() is not ValueTypeBool then returns false.
-// Calling this function on zero-initialized Value will cause a panic.
 func (v Value) Bool() bool {
 	return v.getOrig().GetBoolValue()
 }
 
 // Map returns the map value associated with this Value.
-// If the Type() is not ValueTypeMap then returns an invalid map. Note that using
-// such map can cause panic.
-//
-// Calling this function on zero-initialized Value will cause a panic.
+// If the function is called on zero-initialized Value or if the Type() is not ValueTypeMap
+// then it returns an invalid map. Note that using such map can cause panic.
 func (v Value) Map() Map {
 	kvlist := v.getOrig().GetKvlistValue()
 	if kvlist == nil {
@@ -238,10 +234,8 @@ func (v Value) Map() Map {
 }
 
 // Slice returns the slice value associated with this Value.
-// If the Type() is not ValueTypeSlice then returns an invalid slice. Note that using
-// such slice can cause panic.
-//
-// Calling this function on zero-initialized Value will cause a panic.
+// If the function is called on zero-initialized Value or if the Type() is not ValueTypeSlice
+// then returns an invalid slice. Note that using such slice can cause panic.
 func (v Value) Slice() Slice {
 	arr := v.getOrig().GetArrayValue()
 	if arr == nil {
@@ -251,10 +245,8 @@ func (v Value) Slice() Slice {
 }
 
 // Bytes returns the ByteSlice value associated with this Value.
-// If the Type() is not ValueTypeBytes then returns an invalid ByteSlice object. Note that using
-// such slice can cause panic.
-//
-// Calling this function on zero-initialized Value will cause a panic.
+// If the function is called on zero-initialized Value or if the Type() is not ValueTypeBytes
+// then returns an invalid ByteSlice object. Note that using such slice can cause panic.
 func (v Value) Bytes() ByteSlice {
 	bv, ok := v.getOrig().GetValue().(*otlpcommon.AnyValue_BytesValue)
 	if !ok {
@@ -325,6 +317,7 @@ func (v Value) SetEmptySlice() Slice {
 }
 
 // CopyTo copies the Value instance overriding the destination.
+// Calling this function on zero-initialized Value will cause a panic.
 func (v Value) CopyTo(dest Value) {
 	dest.getState().AssertMutable()
 	destOrig := dest.getOrig()
@@ -370,6 +363,7 @@ func (v Value) CopyTo(dest Value) {
 // AsString converts an OTLP Value object of any type to its equivalent string
 // representation. This differs from Str which only returns a non-empty value
 // if the ValueType is ValueTypeStr.
+// Calling this function on zero-initialized Value will cause a panic.
 func (v Value) AsString() string {
 	switch v.Type() {
 	case ValueTypeEmpty:
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
deleted file mode 100644
index d33c8890fc..0000000000
--- a/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.13
-
-package poly1305
-
-// Generic fallbacks for the math/bits intrinsics, copied from
-// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had
-// variable time fallbacks until Go 1.13.
-
-func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
-	sum = x + y + carry
-	carryOut = ((x & y) | ((x | y) &^ sum)) >> 63
-	return
-}
-
-func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
-	diff = x - y - borrow
-	borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63
-	return
-}
-
-func bitsMul64(x, y uint64) (hi, lo uint64) {
-	const mask32 = 1<<32 - 1
-	x0 := x & mask32
-	x1 := x >> 32
-	y0 := y & mask32
-	y1 := y >> 32
-	w0 := x0 * y0
-	t := x1*y0 + w0>>32
-	w1 := t & mask32
-	w2 := t >> 32
-	w1 += x0 * y1
-	hi = x1*y1 + w2 + w1>>32
-	lo = x * y
-	return
-}
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
deleted file mode 100644
index 495c1fa697..0000000000
--- a/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.13
-
-package poly1305
-
-import "math/bits"
-
-func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) {
-	return bits.Add64(x, y, carry)
-}
-
-func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) {
-	return bits.Sub64(x, y, borrow)
-}
-
-func bitsMul64(x, y uint64) (hi, lo uint64) {
-	return bits.Mul64(x, y)
-}
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
index e041da5ea3..ec2202bd7d 100644
--- a/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
+++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go
@@ -7,7 +7,10 @@
 
 package poly1305
 
-import "encoding/binary"
+import (
+	"encoding/binary"
+	"math/bits"
+)
 
 // Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag
 // for a 64 bytes message is approximately
@@ -114,13 +117,13 @@ type uint128 struct {
 }
 
 func mul64(a, b uint64) uint128 {
-	hi, lo := bitsMul64(a, b)
+	hi, lo := bits.Mul64(a, b)
 	return uint128{lo, hi}
 }
 
 func add128(a, b uint128) uint128 {
-	lo, c := bitsAdd64(a.lo, b.lo, 0)
-	hi, c := bitsAdd64(a.hi, b.hi, c)
+	lo, c := bits.Add64(a.lo, b.lo, 0)
+	hi, c := bits.Add64(a.hi, b.hi, c)
 	if c != 0 {
 		panic("poly1305: unexpected overflow")
 	}
@@ -155,8 +158,8 @@ func updateGeneric(state *macState, msg []byte) {
 		// hide leading zeroes. For full chunks, that's 1 << 128, so we can just
 		// add 1 to the most significant (2¹²⁸) limb, h2.
 		if len(msg) >= TagSize {
-			h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
-			h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
+			h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0)
+			h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c)
 			h2 += c + 1
 
 			msg = msg[TagSize:]
@@ -165,8 +168,8 @@ func updateGeneric(state *macState, msg []byte) {
 			copy(buf[:], msg)
 			buf[len(msg)] = 1
 
-			h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
-			h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
+			h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0)
+			h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c)
 			h2 += c
 
 			msg = nil
@@ -219,9 +222,9 @@ func updateGeneric(state *macState, msg []byte) {
 		m3 := h2r1
 
 		t0 := m0.lo
-		t1, c := bitsAdd64(m1.lo, m0.hi, 0)
-		t2, c := bitsAdd64(m2.lo, m1.hi, c)
-		t3, _ := bitsAdd64(m3.lo, m2.hi, c)
+		t1, c := bits.Add64(m1.lo, m0.hi, 0)
+		t2, c := bits.Add64(m2.lo, m1.hi, c)
+		t3, _ := bits.Add64(m3.lo, m2.hi, c)
 
 		// Now we have the result as 4 64-bit limbs, and we need to reduce it
 		// modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do
@@ -243,14 +246,14 @@ func updateGeneric(state *macState, msg []byte) {
 
 		// To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c.
 
-		h0, c = bitsAdd64(h0, cc.lo, 0)
-		h1, c = bitsAdd64(h1, cc.hi, c)
+		h0, c = bits.Add64(h0, cc.lo, 0)
+		h1, c = bits.Add64(h1, cc.hi, c)
 		h2 += c
 
 		cc = shiftRightBy2(cc)
 
-		h0, c = bitsAdd64(h0, cc.lo, 0)
-		h1, c = bitsAdd64(h1, cc.hi, c)
+		h0, c = bits.Add64(h0, cc.lo, 0)
+		h1, c = bits.Add64(h1, cc.hi, c)
 		h2 += c
 
 		// h2 is at most 3 + 1 + 1 = 5, making the whole of h at most
@@ -287,9 +290,9 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
 	// in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the
 	// result if the subtraction underflows, and t otherwise.
 
-	hMinusP0, b := bitsSub64(h0, p0, 0)
-	hMinusP1, b := bitsSub64(h1, p1, b)
-	_, b = bitsSub64(h2, p2, b)
+	hMinusP0, b := bits.Sub64(h0, p0, 0)
+	hMinusP1, b := bits.Sub64(h1, p1, b)
+	_, b = bits.Sub64(h2, p2, b)
 
 	// h = h if h < p else h - p
 	h0 = select64(b, h0, hMinusP0)
@@ -301,8 +304,8 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) {
 	//
 	// by just doing a wide addition with the 128 low bits of h and discarding
 	// the overflow.
-	h0, c := bitsAdd64(h0, s[0], 0)
-	h1, _ = bitsAdd64(h1, s[1], c)
+	h0, c := bits.Add64(h0, s[0], 0)
+	h1, _ = bits.Add64(h1, s[1], c)
 
 	binary.LittleEndian.PutUint64(out[0:8], h0)
 	binary.LittleEndian.PutUint64(out[8:16], h1)
diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go
index 12b12a30c5..02ccd08a77 100644
--- a/vendor/golang.org/x/oauth2/google/default.go
+++ b/vendor/golang.org/x/oauth2/google/default.go
@@ -12,6 +12,7 @@ import (
 	"os"
 	"path/filepath"
 	"runtime"
+	"sync"
 	"time"
 
 	"cloud.google.com/go/compute/metadata"
@@ -41,12 +42,20 @@ type Credentials struct {
 	// running on Google Cloud Platform.
 	JSON []byte
 
+	udMu sync.Mutex // guards universeDomain
 	// universeDomain is the default service domain for a given Cloud universe.
 	universeDomain string
 }
 
 // UniverseDomain returns the default service domain for a given Cloud universe.
+//
 // The default value is "googleapis.com".
+//
+// Deprecated: Use instead (*Credentials).GetUniverseDomain(), which supports
+// obtaining the universe domain when authenticating via the GCE metadata server.
+// Unlike GetUniverseDomain, this method, UniverseDomain, will always return the
+// default value when authenticating via the GCE metadata server.
+// See also [The attached service account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa).
 func (c *Credentials) UniverseDomain() string {
 	if c.universeDomain == "" {
 		return universeDomainDefault
@@ -54,6 +63,55 @@ func (c *Credentials) UniverseDomain() string {
 	return c.universeDomain
 }
 
+// GetUniverseDomain returns the default service domain for a given Cloud
+// universe.
+//
+// The default value is "googleapis.com".
+//
+// It obtains the universe domain from the attached service account on GCE when
+// authenticating via the GCE metadata server. See also [The attached service
+// account](https://cloud.google.com/docs/authentication/application-default-credentials#attached-sa).
+// If the GCE metadata server returns a 404 error, the default value is
+// returned. If the GCE metadata server returns an error other than 404, the
+// error is returned.
+func (c *Credentials) GetUniverseDomain() (string, error) {
+	c.udMu.Lock()
+	defer c.udMu.Unlock()
+	if c.universeDomain == "" && metadata.OnGCE() {
+		// If we're on Google Compute Engine, an App Engine standard second
+		// generation runtime, or App Engine flexible, use the metadata server.
+		err := c.computeUniverseDomain()
+		if err != nil {
+			return "", err
+		}
+	}
+	// If not on Google Compute Engine, or in case of any non-error path in
+	// computeUniverseDomain that did not set universeDomain, set the default
+	// universe domain.
+	if c.universeDomain == "" {
+		c.universeDomain = universeDomainDefault
+	}
+	return c.universeDomain, nil
+}
+
+// computeUniverseDomain fetches the default service domain for a given Cloud
+// universe from Google Compute Engine (GCE)'s metadata server. It's only valid
+// to use this method if your program is running on a GCE instance.
+func (c *Credentials) computeUniverseDomain() error {
+	var err error
+	c.universeDomain, err = metadata.Get("universe/universe_domain")
+	if err != nil {
+		if _, ok := err.(metadata.NotDefinedError); ok {
+			// http.StatusNotFound (404)
+			c.universeDomain = universeDomainDefault
+			return nil
+		} else {
+			return err
+		}
+	}
+	return nil
+}
+
 // DefaultCredentials is the old name of Credentials.
 //
 // Deprecated: use Credentials instead.
@@ -91,6 +149,12 @@ type CredentialsParams struct {
 	// Note: This option is currently only respected when using credentials
 	// fetched from the GCE metadata server.
 	EarlyTokenRefresh time.Duration
+
+	// UniverseDomain is the default service domain for a given Cloud universe.
+	// Only supported in authentication flows that support universe domains.
+	// This value takes precedence over a universe domain explicitly specified
+	// in a credentials config file or by the GCE metadata server. Optional.
+	UniverseDomain string
 }
 
 func (params CredentialsParams) deepCopy() CredentialsParams {
@@ -175,8 +239,9 @@ func FindDefaultCredentialsWithParams(ctx context.Context, params CredentialsPar
 	if metadata.OnGCE() {
 		id, _ := metadata.ProjectID()
 		return &Credentials{
-			ProjectID:   id,
-			TokenSource: computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...),
+			ProjectID:      id,
+			TokenSource:    computeTokenSource("", params.EarlyTokenRefresh, params.Scopes...),
+			universeDomain: params.UniverseDomain,
 		}, nil
 	}
 
@@ -217,6 +282,9 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params
 	}
 
 	universeDomain := f.UniverseDomain
+	if params.UniverseDomain != "" {
+		universeDomain = params.UniverseDomain
+	}
 	// Authorized user credentials are only supported in the googleapis.com universe.
 	if f.Type == userCredentialsKey {
 		universeDomain = universeDomainDefault
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index b18efb743f..948a3ee63d 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -4,6 +4,9 @@
 
 // Package errgroup provides synchronization, error propagation, and Context
 // cancelation for groups of goroutines working on subtasks of a common task.
+//
+// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
+// returning errors.
 package errgroup
 
 import (
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 6202638bae..c6492020ec 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -248,6 +248,7 @@ struct ltchars {
 #include <linux/module.h>
 #include <linux/mount.h>
 #include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nf_tables.h>
 #include <linux/netlink.h>
 #include <linux/net_namespace.h>
 #include <linux/nfc.h>
@@ -283,10 +284,6 @@ struct ltchars {
 #include <asm/termbits.h>
 #endif
 
-#ifndef MSG_FASTOPEN
-#define MSG_FASTOPEN    0x20000000
-#endif
-
 #ifndef PTRACE_GETREGS
 #define PTRACE_GETREGS	0xc
 #endif
@@ -295,14 +292,6 @@ struct ltchars {
 #define PTRACE_SETREGS	0xd
 #endif
 
-#ifndef SOL_NETLINK
-#define SOL_NETLINK	270
-#endif
-
-#ifndef SOL_SMC
-#define SOL_SMC 286
-#endif
-
 #ifdef SOL_BLUETOOTH
 // SPARC includes this in /usr/include/sparc64-linux-gnu/bits/socket.h
 // but it is already in bluetooth_linux.go
@@ -319,10 +308,23 @@ struct ltchars {
 #undef TIPC_WAIT_FOREVER
 #define TIPC_WAIT_FOREVER 0xffffffff
 
-// Copied from linux/l2tp.h
-// Including linux/l2tp.h here causes conflicts between linux/in.h
-// and netinet/in.h included via net/route.h above.
-#define IPPROTO_L2TP		115
+// Copied from linux/netfilter/nf_nat.h
+// Including linux/netfilter/nf_nat.h here causes conflicts between linux/in.h
+// and netinet/in.h.
+#define NF_NAT_RANGE_MAP_IPS			(1 << 0)
+#define NF_NAT_RANGE_PROTO_SPECIFIED		(1 << 1)
+#define NF_NAT_RANGE_PROTO_RANDOM		(1 << 2)
+#define NF_NAT_RANGE_PERSISTENT			(1 << 3)
+#define NF_NAT_RANGE_PROTO_RANDOM_FULLY		(1 << 4)
+#define NF_NAT_RANGE_PROTO_OFFSET		(1 << 5)
+#define NF_NAT_RANGE_NETMAP			(1 << 6)
+#define NF_NAT_RANGE_PROTO_RANDOM_ALL		\
+	(NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PROTO_RANDOM_FULLY)
+#define NF_NAT_RANGE_MASK					\
+	(NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED |	\
+	 NF_NAT_RANGE_PROTO_RANDOM | NF_NAT_RANGE_PERSISTENT |	\
+	 NF_NAT_RANGE_PROTO_RANDOM_FULLY | NF_NAT_RANGE_PROTO_OFFSET | \
+	 NF_NAT_RANGE_NETMAP)
 
 // Copied from linux/hid.h.
 // Keep in sync with the size of the referenced fields.
@@ -603,6 +605,9 @@ ccflags="$@"
 		$2 ~ /^FSOPT_/ ||
 		$2 ~ /^WDIO[CFS]_/ ||
 		$2 ~ /^NFN/ ||
+		$2 !~ /^NFT_META_IIFTYPE/ &&
+		$2 ~ /^NFT_/ ||
+		$2 ~ /^NF_NAT_/ ||
 		$2 ~ /^XDP_/ ||
 		$2 ~ /^RWF_/ ||
 		$2 ~ /^(HDIO|WIN|SMART)_/ ||
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index c73cfe2f10..a5d3ff8df9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -2127,6 +2127,60 @@ const (
 	NFNL_SUBSYS_QUEUE                           = 0x3
 	NFNL_SUBSYS_ULOG                            = 0x4
 	NFS_SUPER_MAGIC                             = 0x6969
+	NFT_CHAIN_FLAGS                             = 0x7
+	NFT_CHAIN_MAXNAMELEN                        = 0x100
+	NFT_CT_MAX                                  = 0x17
+	NFT_DATA_RESERVED_MASK                      = 0xffffff00
+	NFT_DATA_VALUE_MAXLEN                       = 0x40
+	NFT_EXTHDR_OP_MAX                           = 0x4
+	NFT_FIB_RESULT_MAX                          = 0x3
+	NFT_INNER_MASK                              = 0xf
+	NFT_LOGLEVEL_MAX                            = 0x8
+	NFT_NAME_MAXLEN                             = 0x100
+	NFT_NG_MAX                                  = 0x1
+	NFT_OBJECT_CONNLIMIT                        = 0x5
+	NFT_OBJECT_COUNTER                          = 0x1
+	NFT_OBJECT_CT_EXPECT                        = 0x9
+	NFT_OBJECT_CT_HELPER                        = 0x3
+	NFT_OBJECT_CT_TIMEOUT                       = 0x7
+	NFT_OBJECT_LIMIT                            = 0x4
+	NFT_OBJECT_MAX                              = 0xa
+	NFT_OBJECT_QUOTA                            = 0x2
+	NFT_OBJECT_SECMARK                          = 0x8
+	NFT_OBJECT_SYNPROXY                         = 0xa
+	NFT_OBJECT_TUNNEL                           = 0x6
+	NFT_OBJECT_UNSPEC                           = 0x0
+	NFT_OBJ_MAXNAMELEN                          = 0x100
+	NFT_OSF_MAXGENRELEN                         = 0x10
+	NFT_QUEUE_FLAG_BYPASS                       = 0x1
+	NFT_QUEUE_FLAG_CPU_FANOUT                   = 0x2
+	NFT_QUEUE_FLAG_MASK                         = 0x3
+	NFT_REG32_COUNT                             = 0x10
+	NFT_REG32_SIZE                              = 0x4
+	NFT_REG_MAX                                 = 0x4
+	NFT_REG_SIZE                                = 0x10
+	NFT_REJECT_ICMPX_MAX                        = 0x3
+	NFT_RT_MAX                                  = 0x4
+	NFT_SECMARK_CTX_MAXLEN                      = 0x100
+	NFT_SET_MAXNAMELEN                          = 0x100
+	NFT_SOCKET_MAX                              = 0x3
+	NFT_TABLE_F_MASK                            = 0x3
+	NFT_TABLE_MAXNAMELEN                        = 0x100
+	NFT_TRACETYPE_MAX                           = 0x3
+	NFT_TUNNEL_F_MASK                           = 0x7
+	NFT_TUNNEL_MAX                              = 0x1
+	NFT_TUNNEL_MODE_MAX                         = 0x2
+	NFT_USERDATA_MAXLEN                         = 0x100
+	NFT_XFRM_KEY_MAX                            = 0x6
+	NF_NAT_RANGE_MAP_IPS                        = 0x1
+	NF_NAT_RANGE_MASK                           = 0x7f
+	NF_NAT_RANGE_NETMAP                         = 0x40
+	NF_NAT_RANGE_PERSISTENT                     = 0x8
+	NF_NAT_RANGE_PROTO_OFFSET                   = 0x20
+	NF_NAT_RANGE_PROTO_RANDOM                   = 0x4
+	NF_NAT_RANGE_PROTO_RANDOM_ALL               = 0x14
+	NF_NAT_RANGE_PROTO_RANDOM_FULLY             = 0x10
+	NF_NAT_RANGE_PROTO_SPECIFIED                = 0x2
 	NILFS_SUPER_MAGIC                           = 0x3434
 	NL0                                         = 0x0
 	NL1                                         = 0x100
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
index a1d061597c..9dc42410b7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
 var libc_unveil_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
index 5b2a740977..0d3a0751cd 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
 var libc_unveil_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index f6eda1344a..c39f7776db 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
 var libc_unveil_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
index 55df20ae9d..57571d072f 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
 var libc_unveil_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
index 8c1155cbc0..e62963e67e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
 var libc_unveil_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
index 7cc80c58d9..00831354c8 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
 var libc_unveil_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
index 0688737f49..79029ed584 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go
@@ -2297,5 +2297,3 @@ func unveil(path *byte, flags *byte) (err error) {
 var libc_unveil_trampoline_addr uintptr
 
 //go:cgo_import_dynamic libc_unveil unveil "libc.so"
-
-
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 47dc579676..ffb8708ccf 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -194,6 +194,7 @@ func NewCallbackCDecl(fn interface{}) uintptr {
 //sys	GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW
 //sys	GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW
 //sys	SetEndOfFile(handle Handle) (err error)
+//sys	SetFileValidData(handle Handle, validDataLength int64) (err error)
 //sys	GetSystemTimeAsFileTime(time *Filetime)
 //sys	GetSystemTimePreciseAsFileTime(time *Filetime)
 //sys	GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff]
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 146a1f0196..e8791c82c3 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -342,6 +342,7 @@ var (
 	procSetDefaultDllDirectories                             = modkernel32.NewProc("SetDefaultDllDirectories")
 	procSetDllDirectoryW                                     = modkernel32.NewProc("SetDllDirectoryW")
 	procSetEndOfFile                                         = modkernel32.NewProc("SetEndOfFile")
+	procSetFileValidData                                     = modkernel32.NewProc("SetFileValidData")
 	procSetEnvironmentVariableW                              = modkernel32.NewProc("SetEnvironmentVariableW")
 	procSetErrorMode                                         = modkernel32.NewProc("SetErrorMode")
 	procSetEvent                                             = modkernel32.NewProc("SetEvent")
@@ -2988,6 +2989,14 @@ func SetEndOfFile(handle Handle) (err error) {
 	return
 }
 
+func SetFileValidData(handle Handle, validDataLength int64) (err error) {
+	r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0)
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
 func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
 	r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0)
 	if r1 == 0 {
diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go
index 5dfff4cb2c..947e83a5b5 100644
--- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go
+++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC.
+// Copyright 2024 Google LLC.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -90,7 +90,9 @@ const apiId = "iamcredentials:v1"
 const apiName = "iamcredentials"
 const apiVersion = "v1"
 const basePath = "https://iamcredentials.googleapis.com/"
+const basePathTemplate = "https://iamcredentials.UNIVERSE_DOMAIN/"
 const mtlsBasePath = "https://iamcredentials.mtls.googleapis.com/"
+const defaultUniverseDomain = "googleapis.com"
 
 // OAuth2 scopes used by this API.
 const (
@@ -107,7 +109,9 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err
 	// NOTE: prepend, so we don't override user-specified scopes.
 	opts = append([]option.ClientOption{scopesOption}, opts...)
 	opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
+	opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate))
 	opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
+	opts = append(opts, internaloption.WithDefaultUniverseDomain(defaultUniverseDomain))
 	client, endpoint, err := htransport.NewClient(ctx, opts...)
 	if err != nil {
 		return nil, err
diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go
index 3356fa97b8..285e6e04d3 100644
--- a/vendor/google.golang.org/api/internal/settings.go
+++ b/vendor/google.golang.org/api/internal/settings.go
@@ -27,6 +27,7 @@ const (
 type DialSettings struct {
 	Endpoint                      string
 	DefaultEndpoint               string
+	DefaultEndpointTemplate       string
 	DefaultMTLSEndpoint           string
 	Scopes                        []string
 	DefaultScopes                 []string
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
index 104a911327..8ecad3542b 100644
--- a/vendor/google.golang.org/api/internal/version.go
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -5,4 +5,4 @@
 package internal
 
 // Version is the current tagged release of the library.
-const Version = "0.154.0"
+const Version = "0.156.0"
diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go
index 3fdee095c1..c15be9faa9 100644
--- a/vendor/google.golang.org/api/option/internaloption/internaloption.go
+++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go
@@ -22,10 +22,29 @@ func (o defaultEndpointOption) Apply(settings *internal.DialSettings) {
 // It should only be used internally by generated clients.
 //
 // This is similar to WithEndpoint, but allows us to determine whether the user has overridden the default endpoint.
+//
+// Deprecated: WithDefaultEndpoint does not support setting the universe domain.
+// Use WithDefaultEndpointTemplate and WithDefaultUniverseDomain to compose the
+// default endpoint instead.
 func WithDefaultEndpoint(url string) option.ClientOption {
 	return defaultEndpointOption(url)
 }
 
+type defaultEndpointTemplateOption string
+
+func (o defaultEndpointTemplateOption) Apply(settings *internal.DialSettings) {
+	settings.DefaultEndpointTemplate = string(o)
+}
+
+// WithDefaultEndpointTemplate provides a template for creating the endpoint
+// using a universe domain. See also WithDefaultUniverseDomain and
+// option.WithUniverseDomain.
+//
+// It should only be used internally by generated clients.
+func WithDefaultEndpointTemplate(url string) option.ClientOption {
+	return defaultEndpointTemplateOption(url)
+}
+
 type defaultMTLSEndpointOption string
 
 func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) {
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
index 2c5bfb5b30..2500369220 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-api.json
+++ b/vendor/google.golang.org/api/storage/v1/storage-api.json
@@ -33,7 +33,7 @@
       "location": "me-central2"
     }
   ],
-  "etag": "\"3131373432363238303039393730353234383930\"",
+  "etag": "\"3136323232353032373039383637313835303036\"",
   "icons": {
     "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
     "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
@@ -99,7 +99,7 @@
   },
   "protocol": "rest",
   "resources": {
-    "anywhereCache": {
+    "anywhereCaches": {
       "methods": {
         "disable": {
           "description": "Disables an Anywhere Cache instance.",
@@ -117,7 +117,7 @@
               "type": "string"
             },
             "bucket": {
-              "description": "Name of the partent bucket",
+              "description": "Name of the parent bucket.",
               "location": "path",
               "required": true,
               "type": "string"
@@ -149,7 +149,7 @@
               "type": "string"
             },
             "bucket": {
-              "description": "Name of the partent bucket",
+              "description": "Name of the parent bucket.",
               "location": "path",
               "required": true,
               "type": "string"
@@ -176,7 +176,7 @@
           ],
           "parameters": {
             "bucket": {
-              "description": "Name of the partent bucket",
+              "description": "Name of the parent bucket.",
               "location": "path",
               "required": true,
               "type": "string"
@@ -204,13 +204,13 @@
           ],
           "parameters": {
             "bucket": {
-              "description": "Name of the partent bucket",
+              "description": "Name of the parent bucket.",
               "location": "path",
               "required": true,
               "type": "string"
             },
             "pageSize": {
-              "description": "Maximum number of items return in a single page of responses. Maximum 1000.",
+              "description": "Maximum number of items to return in a single page of responses. Maximum 1000.",
               "format": "int32",
               "location": "query",
               "minimum": "0",
@@ -250,7 +250,7 @@
               "type": "string"
             },
             "bucket": {
-              "description": "Name of the partent bucket",
+              "description": "Name of the parent bucket.",
               "location": "path",
               "required": true,
               "type": "string"
@@ -282,7 +282,7 @@
               "type": "string"
             },
             "bucket": {
-              "description": "Name of the partent bucket",
+              "description": "Name of the parent bucket.",
               "location": "path",
               "required": true,
               "type": "string"
@@ -314,7 +314,7 @@
               "type": "string"
             },
             "bucket": {
-              "description": "Name of the partent bucket",
+              "description": "Name of the parent bucket.",
               "location": "path",
               "required": true,
               "type": "string"
@@ -1387,6 +1387,240 @@
         }
       }
     },
+    "folders": {
+      "methods": {
+        "delete": {
+          "description": "Permanently deletes a folder. Only applicable to buckets with hierarchical namespace enabled.",
+          "httpMethod": "DELETE",
+          "id": "storage.folders.delete",
+          "parameterOrder": [
+            "bucket",
+            "folder"
+          ],
+          "parameters": {
+            "bucket": {
+              "description": "Name of the bucket in which the folder resides.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            },
+            "folder": {
+              "description": "Name of a folder.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            },
+            "ifMetagenerationMatch": {
+              "description": "If set, only deletes the folder if its metageneration matches this value.",
+              "format": "int64",
+              "location": "query",
+              "type": "string"
+            },
+            "ifMetagenerationNotMatch": {
+              "description": "If set, only deletes the folder if its metageneration does not match this value.",
+              "format": "int64",
+              "location": "query",
+              "type": "string"
+            }
+          },
+          "path": "b/{bucket}/folders/{folder}",
+          "scopes": [
+            "https://www.googleapis.com/auth/cloud-platform",
+            "https://www.googleapis.com/auth/devstorage.full_control",
+            "https://www.googleapis.com/auth/devstorage.read_write"
+          ]
+        },
+        "get": {
+          "description": "Returns metadata for the specified folder. Only applicable to buckets with hierarchical namespace enabled.",
+          "httpMethod": "GET",
+          "id": "storage.folders.get",
+          "parameterOrder": [
+            "bucket",
+            "folder"
+          ],
+          "parameters": {
+            "bucket": {
+              "description": "Name of the bucket in which the folder resides.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            },
+            "folder": {
+              "description": "Name of a folder.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            },
+            "ifMetagenerationMatch": {
+              "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration matches the given value.",
+              "format": "int64",
+              "location": "query",
+              "type": "string"
+            },
+            "ifMetagenerationNotMatch": {
+              "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration does not match the given value.",
+              "format": "int64",
+              "location": "query",
+              "type": "string"
+            }
+          },
+          "path": "b/{bucket}/folders/{folder}",
+          "response": {
+            "$ref": "Folder"
+          },
+          "scopes": [
+            "https://www.googleapis.com/auth/cloud-platform",
+            "https://www.googleapis.com/auth/cloud-platform.read-only",
+            "https://www.googleapis.com/auth/devstorage.full_control",
+            "https://www.googleapis.com/auth/devstorage.read_only",
+            "https://www.googleapis.com/auth/devstorage.read_write"
+          ]
+        },
+        "insert": {
+          "description": "Creates a new folder. Only applicable to buckets with hierarchical namespace enabled.",
+          "httpMethod": "POST",
+          "id": "storage.folders.insert",
+          "parameterOrder": [
+            "bucket"
+          ],
+          "parameters": {
+            "bucket": {
+              "description": "Name of the bucket in which the folder resides.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            },
+            "recursive": {
+              "description": "If true, any parent folder which doesn’t exist will be created automatically.",
+              "location": "query",
+              "type": "boolean"
+            }
+          },
+          "path": "b/{bucket}/folders",
+          "request": {
+            "$ref": "Folder"
+          },
+          "response": {
+            "$ref": "Folder"
+          },
+          "scopes": [
+            "https://www.googleapis.com/auth/cloud-platform",
+            "https://www.googleapis.com/auth/devstorage.full_control",
+            "https://www.googleapis.com/auth/devstorage.read_write"
+          ]
+        },
+        "list": {
+          "description": "Retrieves a list of folders matching the criteria. Only applicable to buckets with hierarchical namespace enabled.",
+          "httpMethod": "GET",
+          "id": "storage.folders.list",
+          "parameterOrder": [
+            "bucket"
+          ],
+          "parameters": {
+            "bucket": {
+              "description": "Name of the bucket in which to look for folders.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            },
+            "delimiter": {
+              "description": "Returns results in a directory-like mode. The only supported value is '/'. If set, items will only contain folders that either exactly match the prefix, or are one level below the prefix.",
+              "location": "query",
+              "type": "string"
+            },
+            "endOffset": {
+              "description": "Filter results to folders whose names are lexicographically before endOffset. If startOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).",
+              "location": "query",
+              "type": "string"
+            },
+            "pageSize": {
+              "description": "Maximum number of items to return in a single page of responses.",
+              "format": "int32",
+              "location": "query",
+              "minimum": "0",
+              "type": "integer"
+            },
+            "pageToken": {
+              "description": "A previously-returned page token representing part of the larger set of results to view.",
+              "location": "query",
+              "type": "string"
+            },
+            "prefix": {
+              "description": "Filter results to folders whose paths begin with this prefix. If set, the value must either be an empty string or end with a '/'.",
+              "location": "query",
+              "type": "string"
+            },
+            "startOffset": {
+              "description": "Filter results to folders whose names are lexicographically equal to or after startOffset. If endOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).",
+              "location": "query",
+              "type": "string"
+            }
+          },
+          "path": "b/{bucket}/folders",
+          "response": {
+            "$ref": "Folders"
+          },
+          "scopes": [
+            "https://www.googleapis.com/auth/cloud-platform",
+            "https://www.googleapis.com/auth/cloud-platform.read-only",
+            "https://www.googleapis.com/auth/devstorage.full_control",
+            "https://www.googleapis.com/auth/devstorage.read_only",
+            "https://www.googleapis.com/auth/devstorage.read_write"
+          ]
+        },
+        "rename": {
+          "description": "Renames a source folder to a destination folder. Only applicable to buckets with hierarchical namespace enabled.",
+          "httpMethod": "POST",
+          "id": "storage.folders.rename",
+          "parameterOrder": [
+            "bucket",
+            "sourceFolder",
+            "destinationFolder"
+          ],
+          "parameters": {
+            "bucket": {
+              "description": "Name of the bucket in which the folders are in.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            },
+            "destinationFolder": {
+              "description": "Name of the destination folder.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            },
+            "ifSourceMetagenerationMatch": {
+              "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
+              "format": "int64",
+              "location": "query",
+              "type": "string"
+            },
+            "ifSourceMetagenerationNotMatch": {
+              "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
+              "format": "int64",
+              "location": "query",
+              "type": "string"
+            },
+            "sourceFolder": {
+              "description": "Name of the source folder.",
+              "location": "path",
+              "required": true,
+              "type": "string"
+            }
+          },
+          "path": "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}",
+          "response": {
+            "$ref": "GoogleLongrunningOperation"
+          },
+          "scopes": [
+            "https://www.googleapis.com/auth/cloud-platform",
+            "https://www.googleapis.com/auth/devstorage.full_control",
+            "https://www.googleapis.com/auth/devstorage.read_write"
+          ]
+        }
+      }
+    },
     "managedFolders": {
       "methods": {
         "delete": {
@@ -1565,7 +1799,7 @@
               "type": "string"
             },
             "pageSize": {
-              "description": "Maximum number of items return in a single page of responses.",
+              "description": "Maximum number of items to return in a single page of responses.",
               "format": "int32",
               "location": "query",
               "minimum": "0",
@@ -3806,7 +4040,7 @@
       }
     }
   },
-  "revision": "20231202",
+  "revision": "20240105",
   "rootUrl": "https://storage.googleapis.com/",
   "schemas": {
     "AnywhereCache": {
@@ -3860,6 +4094,10 @@
           "description": "The modification time of the cache instance metadata in RFC 3339 format.",
           "format": "date-time",
           "type": "string"
+        },
+        "zone": {
+          "description": "The zone in which the cache instance is running. For example, us-central1-a.",
+          "type": "string"
         }
       },
       "type": "object"
@@ -4010,6 +4248,16 @@
           "description": "HTTP 1.1 Entity tag for the bucket.",
           "type": "string"
         },
+        "hierarchicalNamespace": {
+          "description": "The bucket's hierarchical namespace configuration.",
+          "properties": {
+            "enabled": {
+              "description": "When set to true, hierarchical namespace is enabled for this bucket.",
+              "type": "boolean"
+            }
+          },
+          "type": "object"
+        },
         "iamConfiguration": {
           "description": "The bucket's IAM configuration.",
           "properties": {
@@ -4597,6 +4845,90 @@
       },
       "type": "object"
     },
+    "Folder": {
+      "description": "A folder. Only available in buckets with hierarchical namespace enabled.",
+      "id": "Folder",
+      "properties": {
+        "bucket": {
+          "description": "The name of the bucket containing this folder.",
+          "type": "string"
+        },
+        "id": {
+          "description": "The ID of the folder, including the bucket name, folder name.",
+          "type": "string"
+        },
+        "kind": {
+          "default": "storage#folder",
+          "description": "The kind of item this is. For folders, this is always storage#folder.",
+          "type": "string"
+        },
+        "metadata": {
+          "additionalProperties": {
+            "description": "An individual metadata entry.",
+            "type": "string"
+          },
+          "description": "User-provided metadata, in key/value pairs.",
+          "type": "object"
+        },
+        "metageneration": {
+          "description": "The version of the metadata for this folder. Used for preconditions and for detecting changes in metadata.",
+          "format": "int64",
+          "type": "string"
+        },
+        "name": {
+          "description": "The name of the folder. Required if not specified by URL parameter.",
+          "type": "string"
+        },
+        "pendingRenameInfo": {
+          "description": "Only present if the folder is part of an ongoing rename folder operation. Contains information which can be used to query the operation status.",
+          "properties": {
+            "operationId": {
+              "description": "The ID of the rename folder operation.",
+              "type": "string"
+            }
+          },
+          "type": "object"
+        },
+        "selfLink": {
+          "description": "The link to this folder.",
+          "type": "string"
+        },
+        "timeCreated": {
+          "description": "The creation time of the folder in RFC 3339 format.",
+          "format": "date-time",
+          "type": "string"
+        },
+        "updated": {
+          "description": "The modification time of the folder metadata in RFC 3339 format.",
+          "format": "date-time",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
+    "Folders": {
+      "description": "A list of folders.",
+      "id": "Folders",
+      "properties": {
+        "items": {
+          "description": "The list of items.",
+          "items": {
+            "$ref": "Folder"
+          },
+          "type": "array"
+        },
+        "kind": {
+          "default": "storage#folders",
+          "description": "The kind of item this is. For lists of folders, this is always storage#folders.",
+          "type": "string"
+        },
+        "nextPageToken": {
+          "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.",
+          "type": "string"
+        }
+      },
+      "type": "object"
+    },
     "GoogleLongrunningListOperationsResponse": {
       "description": "The response message for storage.buckets.operations.list.",
       "id": "GoogleLongrunningListOperationsResponse",
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
index c4331c04de..c34ca98c4d 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC.
+// Copyright 2024 Google LLC.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -98,7 +98,9 @@ const apiId = "storage:v1"
 const apiName = "storage"
 const apiVersion = "v1"
 const basePath = "https://storage.googleapis.com/storage/v1/"
+const basePathTemplate = "https://storage.UNIVERSE_DOMAIN/storage/v1/"
 const mtlsBasePath = "https://storage.mtls.googleapis.com/storage/v1/"
+const defaultUniverseDomain = "googleapis.com"
 
 // OAuth2 scopes used by this API.
 const (
@@ -130,7 +132,9 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err
 	// NOTE: prepend, so we don't override user-specified scopes.
 	opts = append([]option.ClientOption{scopesOption}, opts...)
 	opts = append(opts, internaloption.WithDefaultEndpoint(basePath))
+	opts = append(opts, internaloption.WithDefaultEndpointTemplate(basePathTemplate))
 	opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath))
+	opts = append(opts, internaloption.WithDefaultUniverseDomain(defaultUniverseDomain))
 	client, endpoint, err := htransport.NewClient(ctx, opts...)
 	if err != nil {
 		return nil, err
@@ -155,11 +159,12 @@ func New(client *http.Client) (*Service, error) {
 		return nil, errors.New("client is nil")
 	}
 	s := &Service{client: client, BasePath: basePath}
-	s.AnywhereCache = NewAnywhereCacheService(s)
+	s.AnywhereCaches = NewAnywhereCachesService(s)
 	s.BucketAccessControls = NewBucketAccessControlsService(s)
 	s.Buckets = NewBucketsService(s)
 	s.Channels = NewChannelsService(s)
 	s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s)
+	s.Folders = NewFoldersService(s)
 	s.ManagedFolders = NewManagedFoldersService(s)
 	s.Notifications = NewNotificationsService(s)
 	s.ObjectAccessControls = NewObjectAccessControlsService(s)
@@ -174,7 +179,7 @@ type Service struct {
 	BasePath  string // API endpoint base URL
 	UserAgent string // optional additional User-Agent fragment
 
-	AnywhereCache *AnywhereCacheService
+	AnywhereCaches *AnywhereCachesService
 
 	BucketAccessControls *BucketAccessControlsService
 
@@ -184,6 +189,8 @@ type Service struct {
 
 	DefaultObjectAccessControls *DefaultObjectAccessControlsService
 
+	Folders *FoldersService
+
 	ManagedFolders *ManagedFoldersService
 
 	Notifications *NotificationsService
@@ -204,12 +211,12 @@ func (s *Service) userAgent() string {
 	return googleapi.UserAgent + " " + s.UserAgent
 }
 
-func NewAnywhereCacheService(s *Service) *AnywhereCacheService {
-	rs := &AnywhereCacheService{s: s}
+func NewAnywhereCachesService(s *Service) *AnywhereCachesService {
+	rs := &AnywhereCachesService{s: s}
 	return rs
 }
 
-type AnywhereCacheService struct {
+type AnywhereCachesService struct {
 	s *Service
 }
 
@@ -249,6 +256,15 @@ type DefaultObjectAccessControlsService struct {
 	s *Service
 }
 
+func NewFoldersService(s *Service) *FoldersService {
+	rs := &FoldersService{s: s}
+	return rs
+}
+
+type FoldersService struct {
+	s *Service
+}
+
 func NewManagedFoldersService(s *Service) *ManagedFoldersService {
 	rs := &ManagedFoldersService{s: s}
 	return rs
@@ -367,6 +383,10 @@ type AnywhereCache struct {
 	// RFC 3339 format.
 	UpdateTime string `json:"updateTime,omitempty"`
 
+	// Zone: The zone in which the cache instance is running. For example,
+	// us-central1-a.
+	Zone string `json:"zone,omitempty"`
+
 	// ServerResponse contains the HTTP response code and headers from the
 	// server.
 	googleapi.ServerResponse `json:"-"`
@@ -481,6 +501,10 @@ type Bucket struct {
 	// Etag: HTTP 1.1 Entity tag for the bucket.
 	Etag string `json:"etag,omitempty"`
 
+	// HierarchicalNamespace: The bucket's hierarchical namespace
+	// configuration.
+	HierarchicalNamespace *BucketHierarchicalNamespace `json:"hierarchicalNamespace,omitempty"`
+
 	// IamConfiguration: The bucket's IAM configuration.
 	IamConfiguration *BucketIamConfiguration `json:"iamConfiguration,omitempty"`
 
@@ -781,6 +805,36 @@ func (s *BucketEncryption) MarshalJSON() ([]byte, error) {
 	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
 }
 
+// BucketHierarchicalNamespace: The bucket's hierarchical namespace
+// configuration.
+type BucketHierarchicalNamespace struct {
+	// Enabled: When set to true, hierarchical namespace is enabled for this
+	// bucket.
+	Enabled bool `json:"enabled,omitempty"`
+
+	// ForceSendFields is a list of field names (e.g. "Enabled") to
+	// unconditionally include in API requests. By default, fields with
+	// empty or default values are omitted from API requests. However, any
+	// non-pointer, non-interface field appearing in ForceSendFields will be
+	// sent to the server regardless of whether the field is empty or not.
+	// This may be used to include empty fields in Patch requests.
+	ForceSendFields []string `json:"-"`
+
+	// NullFields is a list of field names (e.g. "Enabled") to include in
+	// API requests with the JSON null value. By default, fields with empty
+	// values are omitted from API requests. However, any field with an
+	// empty value appearing in NullFields will be sent to the server as
+	// null. It is an error if a field in this list has a non-empty value.
+	// This may be used to include null fields in Patch requests.
+	NullFields []string `json:"-"`
+}
+
+func (s *BucketHierarchicalNamespace) MarshalJSON() ([]byte, error) {
+	type NoMethod BucketHierarchicalNamespace
+	raw := NoMethod(*s)
+	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
+}
+
 // BucketIamConfiguration: The bucket's IAM configuration.
 type BucketIamConfiguration struct {
 	// BucketPolicyOnly: The bucket's uniform bucket-level access
@@ -1793,6 +1847,143 @@ func (s *Expr) MarshalJSON() ([]byte, error) {
 	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
 }
 
+// Folder: A folder. Only available in buckets with hierarchical
+// namespace enabled.
+type Folder struct {
+	// Bucket: The name of the bucket containing this folder.
+	Bucket string `json:"bucket,omitempty"`
+
+	// Id: The ID of the folder, including the bucket name, folder name.
+	Id string `json:"id,omitempty"`
+
+	// Kind: The kind of item this is. For folders, this is always
+	// storage#folder.
+	Kind string `json:"kind,omitempty"`
+
+	// Metadata: User-provided metadata, in key/value pairs.
+	Metadata map[string]string `json:"metadata,omitempty"`
+
+	// Metageneration: The version of the metadata for this folder. Used for
+	// preconditions and for detecting changes in metadata.
+	Metageneration int64 `json:"metageneration,omitempty,string"`
+
+	// Name: The name of the folder. Required if not specified by URL
+	// parameter.
+	Name string `json:"name,omitempty"`
+
+	// PendingRenameInfo: Only present if the folder is part of an ongoing
+	// rename folder operation. Contains information which can be used to
+	// query the operation status.
+	PendingRenameInfo *FolderPendingRenameInfo `json:"pendingRenameInfo,omitempty"`
+
+	// SelfLink: The link to this folder.
+	SelfLink string `json:"selfLink,omitempty"`
+
+	// TimeCreated: The creation time of the folder in RFC 3339 format.
+	TimeCreated string `json:"timeCreated,omitempty"`
+
+	// Updated: The modification time of the folder metadata in RFC 3339
+	// format.
+	Updated string `json:"updated,omitempty"`
+
+	// ServerResponse contains the HTTP response code and headers from the
+	// server.
+	googleapi.ServerResponse `json:"-"`
+
+	// ForceSendFields is a list of field names (e.g. "Bucket") to
+	// unconditionally include in API requests. By default, fields with
+	// empty or default values are omitted from API requests. However, any
+	// non-pointer, non-interface field appearing in ForceSendFields will be
+	// sent to the server regardless of whether the field is empty or not.
+	// This may be used to include empty fields in Patch requests.
+	ForceSendFields []string `json:"-"`
+
+	// NullFields is a list of field names (e.g. "Bucket") to include in API
+	// requests with the JSON null value. By default, fields with empty
+	// values are omitted from API requests. However, any field with an
+	// empty value appearing in NullFields will be sent to the server as
+	// null. It is an error if a field in this list has a non-empty value.
+	// This may be used to include null fields in Patch requests.
+	NullFields []string `json:"-"`
+}
+
+func (s *Folder) MarshalJSON() ([]byte, error) {
+	type NoMethod Folder
+	raw := NoMethod(*s)
+	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
+}
+
+// FolderPendingRenameInfo: Only present if the folder is part of an
+// ongoing rename folder operation. Contains information which can be
+// used to query the operation status.
+type FolderPendingRenameInfo struct {
+	// OperationId: The ID of the rename folder operation.
+	OperationId string `json:"operationId,omitempty"`
+
+	// ForceSendFields is a list of field names (e.g. "OperationId") to
+	// unconditionally include in API requests. By default, fields with
+	// empty or default values are omitted from API requests. However, any
+	// non-pointer, non-interface field appearing in ForceSendFields will be
+	// sent to the server regardless of whether the field is empty or not.
+	// This may be used to include empty fields in Patch requests.
+	ForceSendFields []string `json:"-"`
+
+	// NullFields is a list of field names (e.g. "OperationId") to include
+	// in API requests with the JSON null value. By default, fields with
+	// empty values are omitted from API requests. However, any field with
+	// an empty value appearing in NullFields will be sent to the server as
+	// null. It is an error if a field in this list has a non-empty value.
+	// This may be used to include null fields in Patch requests.
+	NullFields []string `json:"-"`
+}
+
+func (s *FolderPendingRenameInfo) MarshalJSON() ([]byte, error) {
+	type NoMethod FolderPendingRenameInfo
+	raw := NoMethod(*s)
+	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
+}
+
+// Folders: A list of folders.
+type Folders struct {
+	// Items: The list of items.
+	Items []*Folder `json:"items,omitempty"`
+
+	// Kind: The kind of item this is. For lists of folders, this is always
+	// storage#folders.
+	Kind string `json:"kind,omitempty"`
+
+	// NextPageToken: The continuation token, used to page through large
+	// result sets. Provide this value in a subsequent request to return the
+	// next page of results.
+	NextPageToken string `json:"nextPageToken,omitempty"`
+
+	// ServerResponse contains the HTTP response code and headers from the
+	// server.
+	googleapi.ServerResponse `json:"-"`
+
+	// ForceSendFields is a list of field names (e.g. "Items") to
+	// unconditionally include in API requests. By default, fields with
+	// empty or default values are omitted from API requests. However, any
+	// non-pointer, non-interface field appearing in ForceSendFields will be
+	// sent to the server regardless of whether the field is empty or not.
+	// This may be used to include empty fields in Patch requests.
+	ForceSendFields []string `json:"-"`
+
+	// NullFields is a list of field names (e.g. "Items") to include in API
+	// requests with the JSON null value. By default, fields with empty
+	// values are omitted from API requests. However, any field with an
+	// empty value appearing in NullFields will be sent to the server as
+	// null. It is an error if a field in this list has a non-empty value.
+	// This may be used to include null fields in Patch requests.
+	NullFields []string `json:"-"`
+}
+
+func (s *Folders) MarshalJSON() ([]byte, error) {
+	type NoMethod Folders
+	raw := NoMethod(*s)
+	return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields)
+}
+
 // GoogleLongrunningListOperationsResponse: The response message for
 // storage.buckets.operations.list.
 type GoogleLongrunningListOperationsResponse struct {
@@ -3067,7 +3258,7 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) {
 
 // method id "storage.anywhereCaches.disable":
 
-type AnywhereCacheDisableCall struct {
+type AnywhereCachesDisableCall struct {
 	s               *Service
 	bucket          string
 	anywhereCacheId string
@@ -3079,9 +3270,9 @@ type AnywhereCacheDisableCall struct {
 // Disable: Disables an Anywhere Cache instance.
 //
 // - anywhereCacheId: The ID of requested Anywhere Cache instance.
-// - bucket: Name of the partent bucket.
-func (r *AnywhereCacheService) Disable(bucket string, anywhereCacheId string) *AnywhereCacheDisableCall {
-	c := &AnywhereCacheDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the parent bucket.
+func (r *AnywhereCachesService) Disable(bucket string, anywhereCacheId string) *AnywhereCachesDisableCall {
+	c := &AnywhereCachesDisableCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.bucket = bucket
 	c.anywhereCacheId = anywhereCacheId
 	return c
@@ -3090,7 +3281,7 @@ func (r *AnywhereCacheService) Disable(bucket string, anywhereCacheId string) *A
 // Fields allows partial responses to be retrieved. See
 // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
 // for more information.
-func (c *AnywhereCacheDisableCall) Fields(s ...googleapi.Field) *AnywhereCacheDisableCall {
+func (c *AnywhereCachesDisableCall) Fields(s ...googleapi.Field) *AnywhereCachesDisableCall {
 	c.urlParams_.Set("fields", googleapi.CombineFields(s))
 	return c
 }
@@ -3098,21 +3289,21 @@ func (c *AnywhereCacheDisableCall) Fields(s ...googleapi.Field) *AnywhereCacheDi
 // Context sets the context to be used in this call's Do method. Any
 // pending HTTP request will be aborted if the provided context is
 // canceled.
-func (c *AnywhereCacheDisableCall) Context(ctx context.Context) *AnywhereCacheDisableCall {
+func (c *AnywhereCachesDisableCall) Context(ctx context.Context) *AnywhereCachesDisableCall {
 	c.ctx_ = ctx
 	return c
 }
 
 // Header returns an http.Header that can be modified by the caller to
 // add HTTP headers to the request.
-func (c *AnywhereCacheDisableCall) Header() http.Header {
+func (c *AnywhereCachesDisableCall) Header() http.Header {
 	if c.header_ == nil {
 		c.header_ = make(http.Header)
 	}
 	return c.header_
 }
 
-func (c *AnywhereCacheDisableCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCachesDisableCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
 	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
 	for k, v := range c.header_ {
@@ -3143,7 +3334,7 @@ func (c *AnywhereCacheDisableCall) doRequest(alt string) (*http.Response, error)
 // at all) in error.(*googleapi.Error).Header. Use
 // googleapi.IsNotModified to check whether the returned error was
 // because http.StatusNotModified was returned.
-func (c *AnywhereCacheDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
+func (c *AnywhereCachesDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
 	gensupport.SetOptions(c.urlParams_, opts...)
 	res, err := c.doRequest("json")
 	if res != nil && res.StatusCode == http.StatusNotModified {
@@ -3189,7 +3380,7 @@ func (c *AnywhereCacheDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereCa
 	//       "type": "string"
 	//     },
 	//     "bucket": {
-	//       "description": "Name of the partent bucket",
+	//       "description": "Name of the parent bucket.",
 	//       "location": "path",
 	//       "required": true,
 	//       "type": "string"
@@ -3210,7 +3401,7 @@ func (c *AnywhereCacheDisableCall) Do(opts ...googleapi.CallOption) (*AnywhereCa
 
 // method id "storage.anywhereCaches.get":
 
-type AnywhereCacheGetCall struct {
+type AnywhereCachesGetCall struct {
 	s               *Service
 	bucket          string
 	anywhereCacheId string
@@ -3223,9 +3414,9 @@ type AnywhereCacheGetCall struct {
 // Get: Returns the metadata of an Anywhere Cache instance.
 //
 // - anywhereCacheId: The ID of requested Anywhere Cache instance.
-// - bucket: Name of the partent bucket.
-func (r *AnywhereCacheService) Get(bucket string, anywhereCacheId string) *AnywhereCacheGetCall {
-	c := &AnywhereCacheGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the parent bucket.
+func (r *AnywhereCachesService) Get(bucket string, anywhereCacheId string) *AnywhereCachesGetCall {
+	c := &AnywhereCachesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.bucket = bucket
 	c.anywhereCacheId = anywhereCacheId
 	return c
@@ -3234,7 +3425,7 @@ func (r *AnywhereCacheService) Get(bucket string, anywhereCacheId string) *Anywh
 // Fields allows partial responses to be retrieved. See
 // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
 // for more information.
-func (c *AnywhereCacheGetCall) Fields(s ...googleapi.Field) *AnywhereCacheGetCall {
+func (c *AnywhereCachesGetCall) Fields(s ...googleapi.Field) *AnywhereCachesGetCall {
 	c.urlParams_.Set("fields", googleapi.CombineFields(s))
 	return c
 }
@@ -3244,7 +3435,7 @@ func (c *AnywhereCacheGetCall) Fields(s ...googleapi.Field) *AnywhereCacheGetCal
 // getting updates only after the object has changed since the last
 // request. Use googleapi.IsNotModified to check whether the response
 // error from Do is the result of In-None-Match.
-func (c *AnywhereCacheGetCall) IfNoneMatch(entityTag string) *AnywhereCacheGetCall {
+func (c *AnywhereCachesGetCall) IfNoneMatch(entityTag string) *AnywhereCachesGetCall {
 	c.ifNoneMatch_ = entityTag
 	return c
 }
@@ -3252,21 +3443,21 @@ func (c *AnywhereCacheGetCall) IfNoneMatch(entityTag string) *AnywhereCacheGetCa
 // Context sets the context to be used in this call's Do method. Any
 // pending HTTP request will be aborted if the provided context is
 // canceled.
-func (c *AnywhereCacheGetCall) Context(ctx context.Context) *AnywhereCacheGetCall {
+func (c *AnywhereCachesGetCall) Context(ctx context.Context) *AnywhereCachesGetCall {
 	c.ctx_ = ctx
 	return c
 }
 
 // Header returns an http.Header that can be modified by the caller to
 // add HTTP headers to the request.
-func (c *AnywhereCacheGetCall) Header() http.Header {
+func (c *AnywhereCachesGetCall) Header() http.Header {
 	if c.header_ == nil {
 		c.header_ = make(http.Header)
 	}
 	return c.header_
 }
 
-func (c *AnywhereCacheGetCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCachesGetCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
 	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
 	for k, v := range c.header_ {
@@ -3300,7 +3491,7 @@ func (c *AnywhereCacheGetCall) doRequest(alt string) (*http.Response, error) {
 // at all) in error.(*googleapi.Error).Header. Use
 // googleapi.IsNotModified to check whether the returned error was
 // because http.StatusNotModified was returned.
-func (c *AnywhereCacheGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
+func (c *AnywhereCachesGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
 	gensupport.SetOptions(c.urlParams_, opts...)
 	res, err := c.doRequest("json")
 	if res != nil && res.StatusCode == http.StatusNotModified {
@@ -3346,7 +3537,7 @@ func (c *AnywhereCacheGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache,
 	//       "type": "string"
 	//     },
 	//     "bucket": {
-	//       "description": "Name of the partent bucket",
+	//       "description": "Name of the parent bucket.",
 	//       "location": "path",
 	//       "required": true,
 	//       "type": "string"
@@ -3369,7 +3560,7 @@ func (c *AnywhereCacheGetCall) Do(opts ...googleapi.CallOption) (*AnywhereCache,
 
 // method id "storage.anywhereCaches.insert":
 
-type AnywhereCacheInsertCall struct {
+type AnywhereCachesInsertCall struct {
 	s             *Service
 	bucket        string
 	anywherecache *AnywhereCache
@@ -3380,9 +3571,9 @@ type AnywhereCacheInsertCall struct {
 
 // Insert: Creates an Anywhere Cache instance.
 //
-// - bucket: Name of the partent bucket.
-func (r *AnywhereCacheService) Insert(bucket string, anywherecache *AnywhereCache) *AnywhereCacheInsertCall {
-	c := &AnywhereCacheInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the parent bucket.
+func (r *AnywhereCachesService) Insert(bucket string, anywherecache *AnywhereCache) *AnywhereCachesInsertCall {
+	c := &AnywhereCachesInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.bucket = bucket
 	c.anywherecache = anywherecache
 	return c
@@ -3391,7 +3582,7 @@ func (r *AnywhereCacheService) Insert(bucket string, anywherecache *AnywhereCach
 // Fields allows partial responses to be retrieved. See
 // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
 // for more information.
-func (c *AnywhereCacheInsertCall) Fields(s ...googleapi.Field) *AnywhereCacheInsertCall {
+func (c *AnywhereCachesInsertCall) Fields(s ...googleapi.Field) *AnywhereCachesInsertCall {
 	c.urlParams_.Set("fields", googleapi.CombineFields(s))
 	return c
 }
@@ -3399,21 +3590,21 @@ func (c *AnywhereCacheInsertCall) Fields(s ...googleapi.Field) *AnywhereCacheIns
 // Context sets the context to be used in this call's Do method. Any
 // pending HTTP request will be aborted if the provided context is
 // canceled.
-func (c *AnywhereCacheInsertCall) Context(ctx context.Context) *AnywhereCacheInsertCall {
+func (c *AnywhereCachesInsertCall) Context(ctx context.Context) *AnywhereCachesInsertCall {
 	c.ctx_ = ctx
 	return c
 }
 
 // Header returns an http.Header that can be modified by the caller to
 // add HTTP headers to the request.
-func (c *AnywhereCacheInsertCall) Header() http.Header {
+func (c *AnywhereCachesInsertCall) Header() http.Header {
 	if c.header_ == nil {
 		c.header_ = make(http.Header)
 	}
 	return c.header_
 }
 
-func (c *AnywhereCacheInsertCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCachesInsertCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
 	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
 	for k, v := range c.header_ {
@@ -3448,7 +3639,7 @@ func (c *AnywhereCacheInsertCall) doRequest(alt string) (*http.Response, error)
 // was returned at all) in error.(*googleapi.Error).Header. Use
 // googleapi.IsNotModified to check whether the returned error was
 // because http.StatusNotModified was returned.
-func (c *AnywhereCacheInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
+func (c *AnywhereCachesInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
 	gensupport.SetOptions(c.urlParams_, opts...)
 	res, err := c.doRequest("json")
 	if res != nil && res.StatusCode == http.StatusNotModified {
@@ -3487,7 +3678,7 @@ func (c *AnywhereCacheInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLongr
 	//   ],
 	//   "parameters": {
 	//     "bucket": {
-	//       "description": "Name of the partent bucket",
+	//       "description": "Name of the parent bucket.",
 	//       "location": "path",
 	//       "required": true,
 	//       "type": "string"
@@ -3511,7 +3702,7 @@ func (c *AnywhereCacheInsertCall) Do(opts ...googleapi.CallOption) (*GoogleLongr
 
 // method id "storage.anywhereCaches.list":
 
-type AnywhereCacheListCall struct {
+type AnywhereCachesListCall struct {
 	s            *Service
 	bucket       string
 	urlParams_   gensupport.URLParams
@@ -3523,16 +3714,16 @@ type AnywhereCacheListCall struct {
 // List: Returns a list of Anywhere Cache instances of the bucket
 // matching the criteria.
 //
-// - bucket: Name of the partent bucket.
-func (r *AnywhereCacheService) List(bucket string) *AnywhereCacheListCall {
-	c := &AnywhereCacheListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the parent bucket.
+func (r *AnywhereCachesService) List(bucket string) *AnywhereCachesListCall {
+	c := &AnywhereCachesListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.bucket = bucket
 	return c
 }
 
 // PageSize sets the optional parameter "pageSize": Maximum number of
-// items return in a single page of responses. Maximum 1000.
-func (c *AnywhereCacheListCall) PageSize(pageSize int64) *AnywhereCacheListCall {
+// items to return in a single page of responses. Maximum 1000.
+func (c *AnywhereCachesListCall) PageSize(pageSize int64) *AnywhereCachesListCall {
 	c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
 	return c
 }
@@ -3540,7 +3731,7 @@ func (c *AnywhereCacheListCall) PageSize(pageSize int64) *AnywhereCacheListCall
 // PageToken sets the optional parameter "pageToken": A
 // previously-returned page token representing part of the larger set of
 // results to view.
-func (c *AnywhereCacheListCall) PageToken(pageToken string) *AnywhereCacheListCall {
+func (c *AnywhereCachesListCall) PageToken(pageToken string) *AnywhereCachesListCall {
 	c.urlParams_.Set("pageToken", pageToken)
 	return c
 }
@@ -3548,7 +3739,7 @@ func (c *AnywhereCacheListCall) PageToken(pageToken string) *AnywhereCacheListCa
 // Fields allows partial responses to be retrieved. See
 // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
 // for more information.
-func (c *AnywhereCacheListCall) Fields(s ...googleapi.Field) *AnywhereCacheListCall {
+func (c *AnywhereCachesListCall) Fields(s ...googleapi.Field) *AnywhereCachesListCall {
 	c.urlParams_.Set("fields", googleapi.CombineFields(s))
 	return c
 }
@@ -3558,7 +3749,7 @@ func (c *AnywhereCacheListCall) Fields(s ...googleapi.Field) *AnywhereCacheListC
 // getting updates only after the object has changed since the last
 // request. Use googleapi.IsNotModified to check whether the response
 // error from Do is the result of In-None-Match.
-func (c *AnywhereCacheListCall) IfNoneMatch(entityTag string) *AnywhereCacheListCall {
+func (c *AnywhereCachesListCall) IfNoneMatch(entityTag string) *AnywhereCachesListCall {
 	c.ifNoneMatch_ = entityTag
 	return c
 }
@@ -3566,21 +3757,21 @@ func (c *AnywhereCacheListCall) IfNoneMatch(entityTag string) *AnywhereCacheList
 // Context sets the context to be used in this call's Do method. Any
 // pending HTTP request will be aborted if the provided context is
 // canceled.
-func (c *AnywhereCacheListCall) Context(ctx context.Context) *AnywhereCacheListCall {
+func (c *AnywhereCachesListCall) Context(ctx context.Context) *AnywhereCachesListCall {
 	c.ctx_ = ctx
 	return c
 }
 
 // Header returns an http.Header that can be modified by the caller to
 // add HTTP headers to the request.
-func (c *AnywhereCacheListCall) Header() http.Header {
+func (c *AnywhereCachesListCall) Header() http.Header {
 	if c.header_ == nil {
 		c.header_ = make(http.Header)
 	}
 	return c.header_
 }
 
-func (c *AnywhereCacheListCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCachesListCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
 	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
 	for k, v := range c.header_ {
@@ -3613,7 +3804,7 @@ func (c *AnywhereCacheListCall) doRequest(alt string) (*http.Response, error) {
 // at all) in error.(*googleapi.Error).Header. Use
 // googleapi.IsNotModified to check whether the returned error was
 // because http.StatusNotModified was returned.
-func (c *AnywhereCacheListCall) Do(opts ...googleapi.CallOption) (*AnywhereCaches, error) {
+func (c *AnywhereCachesListCall) Do(opts ...googleapi.CallOption) (*AnywhereCaches, error) {
 	gensupport.SetOptions(c.urlParams_, opts...)
 	res, err := c.doRequest("json")
 	if res != nil && res.StatusCode == http.StatusNotModified {
@@ -3652,13 +3843,13 @@ func (c *AnywhereCacheListCall) Do(opts ...googleapi.CallOption) (*AnywhereCache
 	//   ],
 	//   "parameters": {
 	//     "bucket": {
-	//       "description": "Name of the partent bucket",
+	//       "description": "Name of the parent bucket.",
 	//       "location": "path",
 	//       "required": true,
 	//       "type": "string"
 	//     },
 	//     "pageSize": {
-	//       "description": "Maximum number of items return in a single page of responses. Maximum 1000.",
+	//       "description": "Maximum number of items to return in a single page of responses. Maximum 1000.",
 	//       "format": "int32",
 	//       "location": "query",
 	//       "minimum": "0",
@@ -3688,7 +3879,7 @@ func (c *AnywhereCacheListCall) Do(opts ...googleapi.CallOption) (*AnywhereCache
 // Pages invokes f for each page of results.
 // A non-nil error returned from f will halt the iteration.
 // The provided context supersedes any context provided to the Context method.
-func (c *AnywhereCacheListCall) Pages(ctx context.Context, f func(*AnywhereCaches) error) error {
+func (c *AnywhereCachesListCall) Pages(ctx context.Context, f func(*AnywhereCaches) error) error {
 	c.ctx_ = ctx
 	defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
 	for {
@@ -3708,7 +3899,7 @@ func (c *AnywhereCacheListCall) Pages(ctx context.Context, f func(*AnywhereCache
 
 // method id "storage.anywhereCaches.pause":
 
-type AnywhereCachePauseCall struct {
+type AnywhereCachesPauseCall struct {
 	s               *Service
 	bucket          string
 	anywhereCacheId string
@@ -3720,9 +3911,9 @@ type AnywhereCachePauseCall struct {
 // Pause: Pauses an Anywhere Cache instance.
 //
 // - anywhereCacheId: The ID of requested Anywhere Cache instance.
-// - bucket: Name of the partent bucket.
-func (r *AnywhereCacheService) Pause(bucket string, anywhereCacheId string) *AnywhereCachePauseCall {
-	c := &AnywhereCachePauseCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the parent bucket.
+func (r *AnywhereCachesService) Pause(bucket string, anywhereCacheId string) *AnywhereCachesPauseCall {
+	c := &AnywhereCachesPauseCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.bucket = bucket
 	c.anywhereCacheId = anywhereCacheId
 	return c
@@ -3731,7 +3922,7 @@ func (r *AnywhereCacheService) Pause(bucket string, anywhereCacheId string) *Any
 // Fields allows partial responses to be retrieved. See
 // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
 // for more information.
-func (c *AnywhereCachePauseCall) Fields(s ...googleapi.Field) *AnywhereCachePauseCall {
+func (c *AnywhereCachesPauseCall) Fields(s ...googleapi.Field) *AnywhereCachesPauseCall {
 	c.urlParams_.Set("fields", googleapi.CombineFields(s))
 	return c
 }
@@ -3739,21 +3930,21 @@ func (c *AnywhereCachePauseCall) Fields(s ...googleapi.Field) *AnywhereCachePaus
 // Context sets the context to be used in this call's Do method. Any
 // pending HTTP request will be aborted if the provided context is
 // canceled.
-func (c *AnywhereCachePauseCall) Context(ctx context.Context) *AnywhereCachePauseCall {
+func (c *AnywhereCachesPauseCall) Context(ctx context.Context) *AnywhereCachesPauseCall {
 	c.ctx_ = ctx
 	return c
 }
 
 // Header returns an http.Header that can be modified by the caller to
 // add HTTP headers to the request.
-func (c *AnywhereCachePauseCall) Header() http.Header {
+func (c *AnywhereCachesPauseCall) Header() http.Header {
 	if c.header_ == nil {
 		c.header_ = make(http.Header)
 	}
 	return c.header_
 }
 
-func (c *AnywhereCachePauseCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCachesPauseCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
 	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
 	for k, v := range c.header_ {
@@ -3784,7 +3975,7 @@ func (c *AnywhereCachePauseCall) doRequest(alt string) (*http.Response, error) {
 // at all) in error.(*googleapi.Error).Header. Use
 // googleapi.IsNotModified to check whether the returned error was
 // because http.StatusNotModified was returned.
-func (c *AnywhereCachePauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
+func (c *AnywhereCachesPauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
 	gensupport.SetOptions(c.urlParams_, opts...)
 	res, err := c.doRequest("json")
 	if res != nil && res.StatusCode == http.StatusNotModified {
@@ -3830,7 +4021,7 @@ func (c *AnywhereCachePauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCach
 	//       "type": "string"
 	//     },
 	//     "bucket": {
-	//       "description": "Name of the partent bucket",
+	//       "description": "Name of the parent bucket.",
 	//       "location": "path",
 	//       "required": true,
 	//       "type": "string"
@@ -3851,7 +4042,7 @@ func (c *AnywhereCachePauseCall) Do(opts ...googleapi.CallOption) (*AnywhereCach
 
 // method id "storage.anywhereCaches.resume":
 
-type AnywhereCacheResumeCall struct {
+type AnywhereCachesResumeCall struct {
 	s               *Service
 	bucket          string
 	anywhereCacheId string
@@ -3863,9 +4054,9 @@ type AnywhereCacheResumeCall struct {
 // Resume: Resumes a paused or disabled Anywhere Cache instance.
 //
 // - anywhereCacheId: The ID of requested Anywhere Cache instance.
-// - bucket: Name of the partent bucket.
-func (r *AnywhereCacheService) Resume(bucket string, anywhereCacheId string) *AnywhereCacheResumeCall {
-	c := &AnywhereCacheResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the parent bucket.
+func (r *AnywhereCachesService) Resume(bucket string, anywhereCacheId string) *AnywhereCachesResumeCall {
+	c := &AnywhereCachesResumeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.bucket = bucket
 	c.anywhereCacheId = anywhereCacheId
 	return c
@@ -3874,7 +4065,7 @@ func (r *AnywhereCacheService) Resume(bucket string, anywhereCacheId string) *An
 // Fields allows partial responses to be retrieved. See
 // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
 // for more information.
-func (c *AnywhereCacheResumeCall) Fields(s ...googleapi.Field) *AnywhereCacheResumeCall {
+func (c *AnywhereCachesResumeCall) Fields(s ...googleapi.Field) *AnywhereCachesResumeCall {
 	c.urlParams_.Set("fields", googleapi.CombineFields(s))
 	return c
 }
@@ -3882,21 +4073,21 @@ func (c *AnywhereCacheResumeCall) Fields(s ...googleapi.Field) *AnywhereCacheRes
 // Context sets the context to be used in this call's Do method. Any
 // pending HTTP request will be aborted if the provided context is
 // canceled.
-func (c *AnywhereCacheResumeCall) Context(ctx context.Context) *AnywhereCacheResumeCall {
+func (c *AnywhereCachesResumeCall) Context(ctx context.Context) *AnywhereCachesResumeCall {
 	c.ctx_ = ctx
 	return c
 }
 
 // Header returns an http.Header that can be modified by the caller to
 // add HTTP headers to the request.
-func (c *AnywhereCacheResumeCall) Header() http.Header {
+func (c *AnywhereCachesResumeCall) Header() http.Header {
 	if c.header_ == nil {
 		c.header_ = make(http.Header)
 	}
 	return c.header_
 }
 
-func (c *AnywhereCacheResumeCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCachesResumeCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
 	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
 	for k, v := range c.header_ {
@@ -3927,7 +4118,7 @@ func (c *AnywhereCacheResumeCall) doRequest(alt string) (*http.Response, error)
 // at all) in error.(*googleapi.Error).Header. Use
 // googleapi.IsNotModified to check whether the returned error was
 // because http.StatusNotModified was returned.
-func (c *AnywhereCacheResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
+func (c *AnywhereCachesResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCache, error) {
 	gensupport.SetOptions(c.urlParams_, opts...)
 	res, err := c.doRequest("json")
 	if res != nil && res.StatusCode == http.StatusNotModified {
@@ -3973,7 +4164,7 @@ func (c *AnywhereCacheResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCac
 	//       "type": "string"
 	//     },
 	//     "bucket": {
-	//       "description": "Name of the partent bucket",
+	//       "description": "Name of the parent bucket.",
 	//       "location": "path",
 	//       "required": true,
 	//       "type": "string"
@@ -3994,7 +4185,7 @@ func (c *AnywhereCacheResumeCall) Do(opts ...googleapi.CallOption) (*AnywhereCac
 
 // method id "storage.anywhereCaches.update":
 
-type AnywhereCacheUpdateCall struct {
+type AnywhereCachesUpdateCall struct {
 	s               *Service
 	bucket          string
 	anywhereCacheId string
@@ -4008,9 +4199,9 @@ type AnywhereCacheUpdateCall struct {
 // Cache instance.
 //
 // - anywhereCacheId: The ID of requested Anywhere Cache instance.
-// - bucket: Name of the partent bucket.
-func (r *AnywhereCacheService) Update(bucket string, anywhereCacheId string, anywherecache *AnywhereCache) *AnywhereCacheUpdateCall {
-	c := &AnywhereCacheUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+// - bucket: Name of the parent bucket.
+func (r *AnywhereCachesService) Update(bucket string, anywhereCacheId string, anywherecache *AnywhereCache) *AnywhereCachesUpdateCall {
+	c := &AnywhereCachesUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
 	c.bucket = bucket
 	c.anywhereCacheId = anywhereCacheId
 	c.anywherecache = anywherecache
@@ -4020,7 +4211,7 @@ func (r *AnywhereCacheService) Update(bucket string, anywhereCacheId string, any
 // Fields allows partial responses to be retrieved. See
 // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
 // for more information.
-func (c *AnywhereCacheUpdateCall) Fields(s ...googleapi.Field) *AnywhereCacheUpdateCall {
+func (c *AnywhereCachesUpdateCall) Fields(s ...googleapi.Field) *AnywhereCachesUpdateCall {
 	c.urlParams_.Set("fields", googleapi.CombineFields(s))
 	return c
 }
@@ -4028,21 +4219,21 @@ func (c *AnywhereCacheUpdateCall) Fields(s ...googleapi.Field) *AnywhereCacheUpd
 // Context sets the context to be used in this call's Do method. Any
 // pending HTTP request will be aborted if the provided context is
 // canceled.
-func (c *AnywhereCacheUpdateCall) Context(ctx context.Context) *AnywhereCacheUpdateCall {
+func (c *AnywhereCachesUpdateCall) Context(ctx context.Context) *AnywhereCachesUpdateCall {
 	c.ctx_ = ctx
 	return c
 }
 
 // Header returns an http.Header that can be modified by the caller to
 // add HTTP headers to the request.
-func (c *AnywhereCacheUpdateCall) Header() http.Header {
+func (c *AnywhereCachesUpdateCall) Header() http.Header {
 	if c.header_ == nil {
 		c.header_ = make(http.Header)
 	}
 	return c.header_
 }
 
-func (c *AnywhereCacheUpdateCall) doRequest(alt string) (*http.Response, error) {
+func (c *AnywhereCachesUpdateCall) doRequest(alt string) (*http.Response, error) {
 	reqHeaders := make(http.Header)
 	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
 	for k, v := range c.header_ {
@@ -4078,7 +4269,7 @@ func (c *AnywhereCacheUpdateCall) doRequest(alt string) (*http.Response, error)
 // was returned at all) in error.(*googleapi.Error).Header. Use
 // googleapi.IsNotModified to check whether the returned error was
 // because http.StatusNotModified was returned.
-func (c *AnywhereCacheUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
+func (c *AnywhereCachesUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
 	gensupport.SetOptions(c.urlParams_, opts...)
 	res, err := c.doRequest("json")
 	if res != nil && res.StatusCode == http.StatusNotModified {
@@ -4124,7 +4315,7 @@ func (c *AnywhereCacheUpdateCall) Do(opts ...googleapi.CallOption) (*GoogleLongr
 	//       "type": "string"
 	//     },
 	//     "bucket": {
-	//       "description": "Name of the partent bucket",
+	//       "description": "Name of the parent bucket.",
 	//       "location": "path",
 	//       "required": true,
 	//       "type": "string"
@@ -8316,6 +8507,932 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption)
 
 }
 
+// method id "storage.folders.delete":
+
+type FoldersDeleteCall struct {
+	s          *Service
+	bucket     string
+	folder     string
+	urlParams_ gensupport.URLParams
+	ctx_       context.Context
+	header_    http.Header
+}
+
+// Delete: Permanently deletes a folder. Only applicable to buckets with
+// hierarchical namespace enabled.
+//
+// - bucket: Name of the bucket in which the folder resides.
+// - folder: Name of a folder.
+func (r *FoldersService) Delete(bucket string, folder string) *FoldersDeleteCall {
+	c := &FoldersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+	c.bucket = bucket
+	c.folder = folder
+	return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": If set, only deletes the folder if its
+// metageneration matches this value.
+func (c *FoldersDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *FoldersDeleteCall {
+	c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+	return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": If set, only deletes the folder if its
+// metageneration does not match this value.
+func (c *FoldersDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *FoldersDeleteCall {
+	c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+	return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *FoldersDeleteCall) Fields(s ...googleapi.Field) *FoldersDeleteCall {
+	c.urlParams_.Set("fields", googleapi.CombineFields(s))
+	return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *FoldersDeleteCall) Context(ctx context.Context) *FoldersDeleteCall {
+	c.ctx_ = ctx
+	return c
+}
+
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *FoldersDeleteCall) Header() http.Header {
+	if c.header_ == nil {
+		c.header_ = make(http.Header)
+	}
+	return c.header_
+}
+
+func (c *FoldersDeleteCall) doRequest(alt string) (*http.Response, error) {
+	reqHeaders := make(http.Header)
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
+	for k, v := range c.header_ {
+		reqHeaders[k] = v
+	}
+	reqHeaders.Set("User-Agent", c.s.userAgent())
+	var body io.Reader = nil
+	c.urlParams_.Set("alt", alt)
+	c.urlParams_.Set("prettyPrint", "false")
+	urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}")
+	urls += "?" + c.urlParams_.Encode()
+	req, err := http.NewRequest("DELETE", urls, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header = reqHeaders
+	googleapi.Expand(req.URL, map[string]string{
+		"bucket": c.bucket,
+		"folder": c.folder,
+	})
+	return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "storage.folders.delete" call.
+func (c *FoldersDeleteCall) Do(opts ...googleapi.CallOption) error {
+	gensupport.SetOptions(c.urlParams_, opts...)
+	res, err := c.doRequest("json")
+	if err != nil {
+		return err
+	}
+	defer googleapi.CloseBody(res)
+	if err := googleapi.CheckResponse(res); err != nil {
+		return gensupport.WrapError(err)
+	}
+	return nil
+	// {
+	//   "description": "Permanently deletes a folder. Only applicable to buckets with hierarchical namespace enabled.",
+	//   "httpMethod": "DELETE",
+	//   "id": "storage.folders.delete",
+	//   "parameterOrder": [
+	//     "bucket",
+	//     "folder"
+	//   ],
+	//   "parameters": {
+	//     "bucket": {
+	//       "description": "Name of the bucket in which the folder resides.",
+	//       "location": "path",
+	//       "required": true,
+	//       "type": "string"
+	//     },
+	//     "folder": {
+	//       "description": "Name of a folder.",
+	//       "location": "path",
+	//       "required": true,
+	//       "type": "string"
+	//     },
+	//     "ifMetagenerationMatch": {
+	//       "description": "If set, only deletes the folder if its metageneration matches this value.",
+	//       "format": "int64",
+	//       "location": "query",
+	//       "type": "string"
+	//     },
+	//     "ifMetagenerationNotMatch": {
+	//       "description": "If set, only deletes the folder if its metageneration does not match this value.",
+	//       "format": "int64",
+	//       "location": "query",
+	//       "type": "string"
+	//     }
+	//   },
+	//   "path": "b/{bucket}/folders/{folder}",
+	//   "scopes": [
+	//     "https://www.googleapis.com/auth/cloud-platform",
+	//     "https://www.googleapis.com/auth/devstorage.full_control",
+	//     "https://www.googleapis.com/auth/devstorage.read_write"
+	//   ]
+	// }
+
+}
+
+// method id "storage.folders.get":
+
+type FoldersGetCall struct {
+	s            *Service
+	bucket       string
+	folder       string
+	urlParams_   gensupport.URLParams
+	ifNoneMatch_ string
+	ctx_         context.Context
+	header_      http.Header
+}
+
+// Get: Returns metadata for the specified folder. Only applicable to
+// buckets with hierarchical namespace enabled.
+//
+// - bucket: Name of the bucket in which the folder resides.
+// - folder: Name of a folder.
+func (r *FoldersService) Get(bucket string, folder string) *FoldersGetCall {
+	c := &FoldersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+	c.bucket = bucket
+	c.folder = folder
+	return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the return of the folder metadata
+// conditional on whether the folder's current metageneration matches
+// the given value.
+func (c *FoldersGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *FoldersGetCall {
+	c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+	return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": Makes the return of the folder metadata
+// conditional on whether the folder's current metageneration does not
+// match the given value.
+func (c *FoldersGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *FoldersGetCall {
+	c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+	return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *FoldersGetCall) Fields(s ...googleapi.Field) *FoldersGetCall {
+	c.urlParams_.Set("fields", googleapi.CombineFields(s))
+	return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *FoldersGetCall) IfNoneMatch(entityTag string) *FoldersGetCall {
+	c.ifNoneMatch_ = entityTag
+	return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *FoldersGetCall) Context(ctx context.Context) *FoldersGetCall {
+	c.ctx_ = ctx
+	return c
+}
+
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *FoldersGetCall) Header() http.Header {
+	if c.header_ == nil {
+		c.header_ = make(http.Header)
+	}
+	return c.header_
+}
+
+func (c *FoldersGetCall) doRequest(alt string) (*http.Response, error) {
+	reqHeaders := make(http.Header)
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
+	for k, v := range c.header_ {
+		reqHeaders[k] = v
+	}
+	reqHeaders.Set("User-Agent", c.s.userAgent())
+	if c.ifNoneMatch_ != "" {
+		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+	}
+	var body io.Reader = nil
+	c.urlParams_.Set("alt", alt)
+	c.urlParams_.Set("prettyPrint", "false")
+	urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{folder}")
+	urls += "?" + c.urlParams_.Encode()
+	req, err := http.NewRequest("GET", urls, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header = reqHeaders
+	googleapi.Expand(req.URL, map[string]string{
+		"bucket": c.bucket,
+		"folder": c.folder,
+	})
+	return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "storage.folders.get" call.
+// Exactly one of *Folder or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Folder.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *FoldersGetCall) Do(opts ...googleapi.CallOption) (*Folder, error) {
+	gensupport.SetOptions(c.urlParams_, opts...)
+	res, err := c.doRequest("json")
+	if res != nil && res.StatusCode == http.StatusNotModified {
+		if res.Body != nil {
+			res.Body.Close()
+		}
+		return nil, gensupport.WrapError(&googleapi.Error{
+			Code:   res.StatusCode,
+			Header: res.Header,
+		})
+	}
+	if err != nil {
+		return nil, err
+	}
+	defer googleapi.CloseBody(res)
+	if err := googleapi.CheckResponse(res); err != nil {
+		return nil, gensupport.WrapError(err)
+	}
+	ret := &Folder{
+		ServerResponse: googleapi.ServerResponse{
+			Header:         res.Header,
+			HTTPStatusCode: res.StatusCode,
+		},
+	}
+	target := &ret
+	if err := gensupport.DecodeResponse(target, res); err != nil {
+		return nil, err
+	}
+	return ret, nil
+	// {
+	//   "description": "Returns metadata for the specified folder. Only applicable to buckets with hierarchical namespace enabled.",
+	//   "httpMethod": "GET",
+	//   "id": "storage.folders.get",
+	//   "parameterOrder": [
+	//     "bucket",
+	//     "folder"
+	//   ],
+	//   "parameters": {
+	//     "bucket": {
+	//       "description": "Name of the bucket in which the folder resides.",
+	//       "location": "path",
+	//       "required": true,
+	//       "type": "string"
+	//     },
+	//     "folder": {
+	//       "description": "Name of a folder.",
+	//       "location": "path",
+	//       "required": true,
+	//       "type": "string"
+	//     },
+	//     "ifMetagenerationMatch": {
+	//       "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration matches the given value.",
+	//       "format": "int64",
+	//       "location": "query",
+	//       "type": "string"
+	//     },
+	//     "ifMetagenerationNotMatch": {
+	//       "description": "Makes the return of the folder metadata conditional on whether the folder's current metageneration does not match the given value.",
+	//       "format": "int64",
+	//       "location": "query",
+	//       "type": "string"
+	//     }
+	//   },
+	//   "path": "b/{bucket}/folders/{folder}",
+	//   "response": {
+	//     "$ref": "Folder"
+	//   },
+	//   "scopes": [
+	//     "https://www.googleapis.com/auth/cloud-platform",
+	//     "https://www.googleapis.com/auth/cloud-platform.read-only",
+	//     "https://www.googleapis.com/auth/devstorage.full_control",
+	//     "https://www.googleapis.com/auth/devstorage.read_only",
+	//     "https://www.googleapis.com/auth/devstorage.read_write"
+	//   ]
+	// }
+
+}
+
+// method id "storage.folders.insert":
+
+type FoldersInsertCall struct {
+	s          *Service
+	bucket     string
+	folder     *Folder
+	urlParams_ gensupport.URLParams
+	ctx_       context.Context
+	header_    http.Header
+}
+
+// Insert: Creates a new folder. Only applicable to buckets with
+// hierarchical namespace enabled.
+//
+// - bucket: Name of the bucket in which the folder resides.
+func (r *FoldersService) Insert(bucket string, folder *Folder) *FoldersInsertCall {
+	c := &FoldersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+	c.bucket = bucket
+	c.folder = folder
+	return c
+}
+
+// Recursive sets the optional parameter "recursive": If true, any
+// parent folder which doesn’t exist will be created automatically.
+func (c *FoldersInsertCall) Recursive(recursive bool) *FoldersInsertCall {
+	c.urlParams_.Set("recursive", fmt.Sprint(recursive))
+	return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *FoldersInsertCall) Fields(s ...googleapi.Field) *FoldersInsertCall {
+	c.urlParams_.Set("fields", googleapi.CombineFields(s))
+	return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *FoldersInsertCall) Context(ctx context.Context) *FoldersInsertCall {
+	c.ctx_ = ctx
+	return c
+}
+
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *FoldersInsertCall) Header() http.Header {
+	if c.header_ == nil {
+		c.header_ = make(http.Header)
+	}
+	return c.header_
+}
+
+func (c *FoldersInsertCall) doRequest(alt string) (*http.Response, error) {
+	reqHeaders := make(http.Header)
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
+	for k, v := range c.header_ {
+		reqHeaders[k] = v
+	}
+	reqHeaders.Set("User-Agent", c.s.userAgent())
+	var body io.Reader = nil
+	body, err := googleapi.WithoutDataWrapper.JSONReader(c.folder)
+	if err != nil {
+		return nil, err
+	}
+	reqHeaders.Set("Content-Type", "application/json")
+	c.urlParams_.Set("alt", alt)
+	c.urlParams_.Set("prettyPrint", "false")
+	urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders")
+	urls += "?" + c.urlParams_.Encode()
+	req, err := http.NewRequest("POST", urls, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header = reqHeaders
+	googleapi.Expand(req.URL, map[string]string{
+		"bucket": c.bucket,
+	})
+	return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "storage.folders.insert" call.
+// Exactly one of *Folder or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Folder.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *FoldersInsertCall) Do(opts ...googleapi.CallOption) (*Folder, error) {
+	gensupport.SetOptions(c.urlParams_, opts...)
+	res, err := c.doRequest("json")
+	if res != nil && res.StatusCode == http.StatusNotModified {
+		if res.Body != nil {
+			res.Body.Close()
+		}
+		return nil, gensupport.WrapError(&googleapi.Error{
+			Code:   res.StatusCode,
+			Header: res.Header,
+		})
+	}
+	if err != nil {
+		return nil, err
+	}
+	defer googleapi.CloseBody(res)
+	if err := googleapi.CheckResponse(res); err != nil {
+		return nil, gensupport.WrapError(err)
+	}
+	ret := &Folder{
+		ServerResponse: googleapi.ServerResponse{
+			Header:         res.Header,
+			HTTPStatusCode: res.StatusCode,
+		},
+	}
+	target := &ret
+	if err := gensupport.DecodeResponse(target, res); err != nil {
+		return nil, err
+	}
+	return ret, nil
+	// {
+	//   "description": "Creates a new folder. Only applicable to buckets with hierarchical namespace enabled.",
+	//   "httpMethod": "POST",
+	//   "id": "storage.folders.insert",
+	//   "parameterOrder": [
+	//     "bucket"
+	//   ],
+	//   "parameters": {
+	//     "bucket": {
+	//       "description": "Name of the bucket in which the folder resides.",
+	//       "location": "path",
+	//       "required": true,
+	//       "type": "string"
+	//     },
+	//     "recursive": {
+	//       "description": "If true, any parent folder which doesn’t exist will be created automatically.",
+	//       "location": "query",
+	//       "type": "boolean"
+	//     }
+	//   },
+	//   "path": "b/{bucket}/folders",
+	//   "request": {
+	//     "$ref": "Folder"
+	//   },
+	//   "response": {
+	//     "$ref": "Folder"
+	//   },
+	//   "scopes": [
+	//     "https://www.googleapis.com/auth/cloud-platform",
+	//     "https://www.googleapis.com/auth/devstorage.full_control",
+	//     "https://www.googleapis.com/auth/devstorage.read_write"
+	//   ]
+	// }
+
+}
+
+// method id "storage.folders.list":
+
+type FoldersListCall struct {
+	s            *Service
+	bucket       string
+	urlParams_   gensupport.URLParams
+	ifNoneMatch_ string
+	ctx_         context.Context
+	header_      http.Header
+}
+
+// List: Retrieves a list of folders matching the criteria. Only
+// applicable to buckets with hierarchical namespace enabled.
+//
+// - bucket: Name of the bucket in which to look for folders.
+func (r *FoldersService) List(bucket string) *FoldersListCall {
+	c := &FoldersListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+	c.bucket = bucket
+	return c
+}
+
+// Delimiter sets the optional parameter "delimiter": Returns results in
+// a directory-like mode. The only supported value is '/'. If set, items
+// will only contain folders that either exactly match the prefix, or
+// are one level below the prefix.
+func (c *FoldersListCall) Delimiter(delimiter string) *FoldersListCall {
+	c.urlParams_.Set("delimiter", delimiter)
+	return c
+}
+
+// EndOffset sets the optional parameter "endOffset": Filter results to
+// folders whose names are lexicographically before endOffset. If
+// startOffset is also set, the folders listed will have names between
+// startOffset (inclusive) and endOffset (exclusive).
+func (c *FoldersListCall) EndOffset(endOffset string) *FoldersListCall {
+	c.urlParams_.Set("endOffset", endOffset)
+	return c
+}
+
+// PageSize sets the optional parameter "pageSize": Maximum number of
+// items to return in a single page of responses.
+func (c *FoldersListCall) PageSize(pageSize int64) *FoldersListCall {
+	c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
+	return c
+}
+
+// PageToken sets the optional parameter "pageToken": A
+// previously-returned page token representing part of the larger set of
+// results to view.
+func (c *FoldersListCall) PageToken(pageToken string) *FoldersListCall {
+	c.urlParams_.Set("pageToken", pageToken)
+	return c
+}
+
+// Prefix sets the optional parameter "prefix": Filter results to
+// folders whose paths begin with this prefix. If set, the value must
+// either be an empty string or end with a '/'.
+func (c *FoldersListCall) Prefix(prefix string) *FoldersListCall {
+	c.urlParams_.Set("prefix", prefix)
+	return c
+}
+
+// StartOffset sets the optional parameter "startOffset": Filter results
+// to folders whose names are lexicographically equal to or after
+// startOffset. If endOffset is also set, the folders listed will have
+// names between startOffset (inclusive) and endOffset (exclusive).
+func (c *FoldersListCall) StartOffset(startOffset string) *FoldersListCall {
+	c.urlParams_.Set("startOffset", startOffset)
+	return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *FoldersListCall) Fields(s ...googleapi.Field) *FoldersListCall {
+	c.urlParams_.Set("fields", googleapi.CombineFields(s))
+	return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *FoldersListCall) IfNoneMatch(entityTag string) *FoldersListCall {
+	c.ifNoneMatch_ = entityTag
+	return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *FoldersListCall) Context(ctx context.Context) *FoldersListCall {
+	c.ctx_ = ctx
+	return c
+}
+
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *FoldersListCall) Header() http.Header {
+	if c.header_ == nil {
+		c.header_ = make(http.Header)
+	}
+	return c.header_
+}
+
+func (c *FoldersListCall) doRequest(alt string) (*http.Response, error) {
+	reqHeaders := make(http.Header)
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
+	for k, v := range c.header_ {
+		reqHeaders[k] = v
+	}
+	reqHeaders.Set("User-Agent", c.s.userAgent())
+	if c.ifNoneMatch_ != "" {
+		reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+	}
+	var body io.Reader = nil
+	c.urlParams_.Set("alt", alt)
+	c.urlParams_.Set("prettyPrint", "false")
+	urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders")
+	urls += "?" + c.urlParams_.Encode()
+	req, err := http.NewRequest("GET", urls, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header = reqHeaders
+	googleapi.Expand(req.URL, map[string]string{
+		"bucket": c.bucket,
+	})
+	return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "storage.folders.list" call.
+// Exactly one of *Folders or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Folders.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *FoldersListCall) Do(opts ...googleapi.CallOption) (*Folders, error) {
+	gensupport.SetOptions(c.urlParams_, opts...)
+	res, err := c.doRequest("json")
+	if res != nil && res.StatusCode == http.StatusNotModified {
+		if res.Body != nil {
+			res.Body.Close()
+		}
+		return nil, gensupport.WrapError(&googleapi.Error{
+			Code:   res.StatusCode,
+			Header: res.Header,
+		})
+	}
+	if err != nil {
+		return nil, err
+	}
+	defer googleapi.CloseBody(res)
+	if err := googleapi.CheckResponse(res); err != nil {
+		return nil, gensupport.WrapError(err)
+	}
+	ret := &Folders{
+		ServerResponse: googleapi.ServerResponse{
+			Header:         res.Header,
+			HTTPStatusCode: res.StatusCode,
+		},
+	}
+	target := &ret
+	if err := gensupport.DecodeResponse(target, res); err != nil {
+		return nil, err
+	}
+	return ret, nil
+	// {
+	//   "description": "Retrieves a list of folders matching the criteria. Only applicable to buckets with hierarchical namespace enabled.",
+	//   "httpMethod": "GET",
+	//   "id": "storage.folders.list",
+	//   "parameterOrder": [
+	//     "bucket"
+	//   ],
+	//   "parameters": {
+	//     "bucket": {
+	//       "description": "Name of the bucket in which to look for folders.",
+	//       "location": "path",
+	//       "required": true,
+	//       "type": "string"
+	//     },
+	//     "delimiter": {
+	//       "description": "Returns results in a directory-like mode. The only supported value is '/'. If set, items will only contain folders that either exactly match the prefix, or are one level below the prefix.",
+	//       "location": "query",
+	//       "type": "string"
+	//     },
+	//     "endOffset": {
+	//       "description": "Filter results to folders whose names are lexicographically before endOffset. If startOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).",
+	//       "location": "query",
+	//       "type": "string"
+	//     },
+	//     "pageSize": {
+	//       "description": "Maximum number of items to return in a single page of responses.",
+	//       "format": "int32",
+	//       "location": "query",
+	//       "minimum": "0",
+	//       "type": "integer"
+	//     },
+	//     "pageToken": {
+	//       "description": "A previously-returned page token representing part of the larger set of results to view.",
+	//       "location": "query",
+	//       "type": "string"
+	//     },
+	//     "prefix": {
+	//       "description": "Filter results to folders whose paths begin with this prefix. If set, the value must either be an empty string or end with a '/'.",
+	//       "location": "query",
+	//       "type": "string"
+	//     },
+	//     "startOffset": {
+	//       "description": "Filter results to folders whose names are lexicographically equal to or after startOffset. If endOffset is also set, the folders listed will have names between startOffset (inclusive) and endOffset (exclusive).",
+	//       "location": "query",
+	//       "type": "string"
+	//     }
+	//   },
+	//   "path": "b/{bucket}/folders",
+	//   "response": {
+	//     "$ref": "Folders"
+	//   },
+	//   "scopes": [
+	//     "https://www.googleapis.com/auth/cloud-platform",
+	//     "https://www.googleapis.com/auth/cloud-platform.read-only",
+	//     "https://www.googleapis.com/auth/devstorage.full_control",
+	//     "https://www.googleapis.com/auth/devstorage.read_only",
+	//     "https://www.googleapis.com/auth/devstorage.read_write"
+	//   ]
+	// }
+
+}
+
+// Pages invokes f for each page of results.
+// A non-nil error returned from f will halt the iteration.
+// The provided context supersedes any context provided to the Context method.
+func (c *FoldersListCall) Pages(ctx context.Context, f func(*Folders) error) error {
+	c.ctx_ = ctx
+	defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
+	for {
+		x, err := c.Do()
+		if err != nil {
+			return err
+		}
+		if err := f(x); err != nil {
+			return err
+		}
+		if x.NextPageToken == "" {
+			return nil
+		}
+		c.PageToken(x.NextPageToken)
+	}
+}
+
+// method id "storage.folders.rename":
+
+type FoldersRenameCall struct {
+	s                 *Service
+	bucket            string
+	sourceFolder      string
+	destinationFolder string
+	urlParams_        gensupport.URLParams
+	ctx_              context.Context
+	header_           http.Header
+}
+
+// Rename: Renames a source folder to a destination folder. Only
+// applicable to buckets with hierarchical namespace enabled.
+//
+// - bucket: Name of the bucket in which the folders are in.
+// - destinationFolder: Name of the destination folder.
+// - sourceFolder: Name of the source folder.
+func (r *FoldersService) Rename(bucket string, sourceFolder string, destinationFolder string) *FoldersRenameCall {
+	c := &FoldersRenameCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+	c.bucket = bucket
+	c.sourceFolder = sourceFolder
+	c.destinationFolder = destinationFolder
+	return c
+}
+
+// IfSourceMetagenerationMatch sets the optional parameter
+// "ifSourceMetagenerationMatch": Makes the operation conditional on
+// whether the source object's current metageneration matches the given
+// value.
+func (c *FoldersRenameCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *FoldersRenameCall {
+	c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch))
+	return c
+}
+
+// IfSourceMetagenerationNotMatch sets the optional parameter
+// "ifSourceMetagenerationNotMatch": Makes the operation conditional on
+// whether the source object's current metageneration does not match the
+// given value.
+func (c *FoldersRenameCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *FoldersRenameCall {
+	c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch))
+	return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *FoldersRenameCall) Fields(s ...googleapi.Field) *FoldersRenameCall {
+	c.urlParams_.Set("fields", googleapi.CombineFields(s))
+	return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *FoldersRenameCall) Context(ctx context.Context) *FoldersRenameCall {
+	c.ctx_ = ctx
+	return c
+}
+
+// Header returns an http.Header that can be modified by the caller to
+// add HTTP headers to the request.
+func (c *FoldersRenameCall) Header() http.Header {
+	if c.header_ == nil {
+		c.header_ = make(http.Header)
+	}
+	return c.header_
+}
+
+func (c *FoldersRenameCall) doRequest(alt string) (*http.Response, error) {
+	reqHeaders := make(http.Header)
+	reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version)
+	for k, v := range c.header_ {
+		reqHeaders[k] = v
+	}
+	reqHeaders.Set("User-Agent", c.s.userAgent())
+	var body io.Reader = nil
+	c.urlParams_.Set("alt", alt)
+	c.urlParams_.Set("prettyPrint", "false")
+	urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}")
+	urls += "?" + c.urlParams_.Encode()
+	req, err := http.NewRequest("POST", urls, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header = reqHeaders
+	googleapi.Expand(req.URL, map[string]string{
+		"bucket":            c.bucket,
+		"sourceFolder":      c.sourceFolder,
+		"destinationFolder": c.destinationFolder,
+	})
+	return gensupport.SendRequest(c.ctx_, c.s.client, req)
+}
+
+// Do executes the "storage.folders.rename" call.
+// Exactly one of *GoogleLongrunningOperation or error will be non-nil.
+// Any non-2xx status code is an error. Response headers are in either
+// *GoogleLongrunningOperation.ServerResponse.Header or (if a response
+// was returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *FoldersRenameCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) {
+	gensupport.SetOptions(c.urlParams_, opts...)
+	res, err := c.doRequest("json")
+	if res != nil && res.StatusCode == http.StatusNotModified {
+		if res.Body != nil {
+			res.Body.Close()
+		}
+		return nil, gensupport.WrapError(&googleapi.Error{
+			Code:   res.StatusCode,
+			Header: res.Header,
+		})
+	}
+	if err != nil {
+		return nil, err
+	}
+	defer googleapi.CloseBody(res)
+	if err := googleapi.CheckResponse(res); err != nil {
+		return nil, gensupport.WrapError(err)
+	}
+	ret := &GoogleLongrunningOperation{
+		ServerResponse: googleapi.ServerResponse{
+			Header:         res.Header,
+			HTTPStatusCode: res.StatusCode,
+		},
+	}
+	target := &ret
+	if err := gensupport.DecodeResponse(target, res); err != nil {
+		return nil, err
+	}
+	return ret, nil
+	// {
+	//   "description": "Renames a source folder to a destination folder. Only applicable to buckets with hierarchical namespace enabled.",
+	//   "httpMethod": "POST",
+	//   "id": "storage.folders.rename",
+	//   "parameterOrder": [
+	//     "bucket",
+	//     "sourceFolder",
+	//     "destinationFolder"
+	//   ],
+	//   "parameters": {
+	//     "bucket": {
+	//       "description": "Name of the bucket in which the folders are in.",
+	//       "location": "path",
+	//       "required": true,
+	//       "type": "string"
+	//     },
+	//     "destinationFolder": {
+	//       "description": "Name of the destination folder.",
+	//       "location": "path",
+	//       "required": true,
+	//       "type": "string"
+	//     },
+	//     "ifSourceMetagenerationMatch": {
+	//       "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
+	//       "format": "int64",
+	//       "location": "query",
+	//       "type": "string"
+	//     },
+	//     "ifSourceMetagenerationNotMatch": {
+	//       "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
+	//       "format": "int64",
+	//       "location": "query",
+	//       "type": "string"
+	//     },
+	//     "sourceFolder": {
+	//       "description": "Name of the source folder.",
+	//       "location": "path",
+	//       "required": true,
+	//       "type": "string"
+	//     }
+	//   },
+	//   "path": "b/{bucket}/folders/{sourceFolder}/renameTo/folders/{destinationFolder}",
+	//   "response": {
+	//     "$ref": "GoogleLongrunningOperation"
+	//   },
+	//   "scopes": [
+	//     "https://www.googleapis.com/auth/cloud-platform",
+	//     "https://www.googleapis.com/auth/devstorage.full_control",
+	//     "https://www.googleapis.com/auth/devstorage.read_write"
+	//   ]
+	// }
+
+}
+
 // method id "storage.managedFolders.delete":
 
 type ManagedFoldersDeleteCall struct {
@@ -8999,7 +10116,7 @@ func (r *ManagedFoldersService) List(bucket string) *ManagedFoldersListCall {
 }
 
 // PageSize sets the optional parameter "pageSize": Maximum number of
-// items return in a single page of responses.
+// items to return in a single page of responses.
 func (c *ManagedFoldersListCall) PageSize(pageSize int64) *ManagedFoldersListCall {
 	c.urlParams_.Set("pageSize", fmt.Sprint(pageSize))
 	return c
@@ -9133,7 +10250,7 @@ func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolde
 	//       "type": "string"
 	//     },
 	//     "pageSize": {
-	//       "description": "Maximum number of items return in a single page of responses.",
+	//       "description": "Maximum number of items to return in a single page of responses.",
 	//       "format": "int32",
 	//       "location": "query",
 	//       "minimum": "0",
diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go
index 34f9ba8bdb..10830f0164 100644
--- a/vendor/google.golang.org/api/transport/grpc/dial.go
+++ b/vendor/google.golang.org/api/transport/grpc/dial.go
@@ -14,6 +14,7 @@ import (
 	"net"
 	"os"
 	"strings"
+	"sync"
 	"time"
 
 	"cloud.google.com/go/compute/metadata"
@@ -27,6 +28,7 @@ import (
 	grpcgoogle "google.golang.org/grpc/credentials/google"
 	grpcinsecure "google.golang.org/grpc/credentials/insecure"
 	"google.golang.org/grpc/credentials/oauth"
+	"google.golang.org/grpc/stats"
 
 	// Install grpclb, which is required for direct path.
 	_ "google.golang.org/grpc/balancer/grpclb"
@@ -47,6 +49,26 @@ var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second}
 // Assign to var for unit test replacement
 var dialContext = grpc.DialContext
 
+// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across
+// all dial connections to avoid the memory leak documented in
+// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226
+//
+// TODO: If 4226 has been fixed in opentelemetry-go-contrib, replace this
+// singleton with inline usage for simplicity.
+var (
+	initOtelStatsHandlerOnce sync.Once
+	otelStatsHandler         stats.Handler
+)
+
+// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all
+// dial connections.
+func otelGRPCStatsHandler() stats.Handler {
+	initOtelStatsHandlerOnce.Do(func() {
+		otelStatsHandler = otelgrpc.NewClientHandler()
+	})
+	return otelStatsHandler
+}
+
 // Dial returns a GRPC connection for use communicating with a Google cloud
 // service, configured with the given ClientOptions.
 func Dial(ctx context.Context, opts ...option.ClientOption) (*grpc.ClientConn, error) {
@@ -146,52 +168,56 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C
 	// when dialing an insecure connection?
 	if !o.NoAuth && !insecure {
 		if o.APIKey != "" {
-			log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.")
-		}
-		creds, err := internal.Creds(ctx, o)
-		if err != nil {
-			return nil, err
-		}
-
-		grpcOpts = append(grpcOpts,
-			grpc.WithPerRPCCredentials(grpcTokenSource{
-				TokenSource:   oauth.TokenSource{creds.TokenSource},
+			grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcAPIKey{
+				apiKey:        o.APIKey,
+				requestReason: o.RequestReason,
+			}))
+		} else {
+			creds, err := internal.Creds(ctx, o)
+			if err != nil {
+				return nil, err
+			}
+			grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{
+				TokenSource:   oauth.TokenSource{TokenSource: creds.TokenSource},
 				quotaProject:  internal.GetQuotaProject(creds, o.QuotaProject),
 				requestReason: o.RequestReason,
-			}),
-		)
-
-		// Attempt Direct Path:
-		logRateLimiter.Do(func() {
-			logDirectPathMisconfig(endpoint, creds.TokenSource, o)
-		})
-		if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() {
-			// Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
-			grpcOpts = []grpc.DialOption{
-				grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))}
-			if timeoutDialerOption != nil {
-				grpcOpts = append(grpcOpts, timeoutDialerOption)
-			}
-			// Check if google-c2p resolver is enabled for DirectPath
-			if isDirectPathXdsUsed(o) {
-				// google-c2p resolver target must not have a port number
-				if addr, _, err := net.SplitHostPort(endpoint); err == nil {
-					endpoint = "google-c2p:///" + addr
+			}))
+			// Attempt Direct Path:
+			logRateLimiter.Do(func() {
+				logDirectPathMisconfig(endpoint, creds.TokenSource, o)
+			})
+			if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() {
+				// Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
+				grpcOpts = []grpc.DialOption{
+					grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(
+						grpcgoogle.DefaultCredentialsOptions{
+							PerRPCCreds: oauth.TokenSource{TokenSource: creds.TokenSource},
+						})),
+				}
+				if timeoutDialerOption != nil {
+					grpcOpts = append(grpcOpts, timeoutDialerOption)
+				}
+				// Check if google-c2p resolver is enabled for DirectPath
+				if isDirectPathXdsUsed(o) {
+					// google-c2p resolver target must not have a port number
+					if addr, _, err := net.SplitHostPort(endpoint); err == nil {
+						endpoint = "google-c2p:///" + addr
+					} else {
+						endpoint = "google-c2p:///" + endpoint
+					}
 				} else {
-					endpoint = "google-c2p:///" + endpoint
+					if !strings.HasPrefix(endpoint, "dns:///") {
+						endpoint = "dns:///" + endpoint
+					}
+					grpcOpts = append(grpcOpts,
+						// For now all DirectPath go clients will be using the following lb config, but in future
+						// when different services need different configs, then we should change this to a
+						// per-service config.
+						grpc.WithDisableServiceConfig(),
+						grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`))
 				}
-			} else {
-				if !strings.HasPrefix(endpoint, "dns:///") {
-					endpoint = "dns:///" + endpoint
-				}
-				grpcOpts = append(grpcOpts,
-					// For now all DirectPath go clients will be using the following lb config, but in future
-					// when different services need different configs, then we should change this to a
-					// per-service config.
-					grpc.WithDisableServiceConfig(),
-					grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`))
+				// TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor.
 			}
-			// TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor.
 		}
 	}
 
@@ -219,7 +245,7 @@ func addOpenTelemetryStatsHandler(opts []grpc.DialOption, settings *internal.Dia
 	if settings.TelemetryDisabled {
 		return opts
 	}
-	return append(opts, grpc.WithStatsHandler(otelgrpc.NewClientHandler()))
+	return append(opts, grpc.WithStatsHandler(otelGRPCStatsHandler()))
 }
 
 // grpcTokenSource supplies PerRPCCredentials from an oauth.TokenSource.
@@ -249,6 +275,31 @@ func (ts grpcTokenSource) GetRequestMetadata(ctx context.Context, uri ...string)
 	return metadata, nil
 }
 
+// grpcAPIKey supplies PerRPCCredentials from an API Key.
+type grpcAPIKey struct {
+	apiKey string
+
+	// Additional metadata attached as headers.
+	requestReason string
+}
+
+// GetRequestMetadata gets the request metadata as a map from a grpcAPIKey.
+func (ts grpcAPIKey) GetRequestMetadata(ctx context.Context, uri ...string) (
+	map[string]string, error) {
+	metadata := map[string]string{
+		"X-goog-api-key": ts.apiKey,
+	}
+	if ts.requestReason != "" {
+		metadata["X-goog-request-reason"] = ts.requestReason
+	}
+	return metadata, nil
+}
+
+// RequireTransportSecurity indicates whether the credentials requires transport security.
+func (ts grpcAPIKey) RequireTransportSecurity() bool {
+	return true
+}
+
 func isDirectPathEnabled(endpoint string, o *internal.DialSettings) bool {
 	if !o.EnableDirectPath {
 		return false
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 2fa694d555..682fa1831e 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -144,7 +144,8 @@ type Server struct {
 	channelzID *channelz.Identifier
 	czData     *channelzData
 
-	serverWorkerChannel chan func()
+	serverWorkerChannel      chan func()
+	serverWorkerChannelClose func()
 }
 
 type serverOptions struct {
@@ -623,15 +624,14 @@ func (s *Server) serverWorker() {
 // connections to reduce the time spent overall on runtime.morestack.
 func (s *Server) initServerWorkers() {
 	s.serverWorkerChannel = make(chan func())
+	s.serverWorkerChannelClose = grpcsync.OnceFunc(func() {
+		close(s.serverWorkerChannel)
+	})
 	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
 		go s.serverWorker()
 	}
 }
 
-func (s *Server) stopServerWorkers() {
-	close(s.serverWorkerChannel)
-}
-
 // NewServer creates a gRPC server which has no service registered and has not
 // started to accept requests yet.
 func NewServer(opt ...ServerOption) *Server {
@@ -1898,15 +1898,19 @@ func (s *Server) stop(graceful bool) {
 		s.closeServerTransportsLocked()
 	}
 
-	if s.opts.numServerWorkers > 0 {
-		s.stopServerWorkers()
-	}
-
 	for len(s.conns) != 0 {
 		s.cv.Wait()
 	}
 	s.conns = nil
 
+	if s.opts.numServerWorkers > 0 {
+		// Closing the channel (only once, via grpcsync.OnceFunc) after all the
+		// connections have been closed above ensures that there are no
+		// goroutines executing the callback passed to st.HandleStreams (where
+		// the channel is written to).
+		s.serverWorkerChannelClose()
+	}
+
 	if s.events != nil {
 		s.events.Finish()
 		s.events = nil
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index a04793aeb5..dc2cea59c9 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
 package grpc
 
 // Version is the current grpc version.
-const Version = "1.60.0"
+const Version = "1.60.1"
diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
new file mode 100644
index 0000000000..2ef36bbcf9
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protodelim marshals and unmarshals varint size-delimited messages.
+package protodelim
+
+import (
+	"bufio"
+	"encoding/binary"
+	"fmt"
+	"io"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/proto"
+)
+
+// MarshalOptions is a configurable varint size-delimited marshaler.
+type MarshalOptions struct{ proto.MarshalOptions }
+
+// MarshalTo writes a varint size-delimited wire-format message to w.
+// If w returns an error, MarshalTo returns it unchanged.
+func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) {
+	msgBytes, err := o.MarshalOptions.Marshal(m)
+	if err != nil {
+		return 0, err
+	}
+
+	sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes)))
+	sizeWritten, err := w.Write(sizeBytes)
+	if err != nil {
+		return sizeWritten, err
+	}
+	msgWritten, err := w.Write(msgBytes)
+	if err != nil {
+		return sizeWritten + msgWritten, err
+	}
+	return sizeWritten + msgWritten, nil
+}
+
+// MarshalTo writes a varint size-delimited wire-format message to w
+// with the default options.
+//
+// See the documentation for [MarshalOptions.MarshalTo].
+func MarshalTo(w io.Writer, m proto.Message) (int, error) {
+	return MarshalOptions{}.MarshalTo(w, m)
+}
+
+// UnmarshalOptions is a configurable varint size-delimited unmarshaler.
+type UnmarshalOptions struct {
+	proto.UnmarshalOptions
+
+	// MaxSize is the maximum size in wire-format bytes of a single message.
+	// Unmarshaling a message larger than MaxSize will return an error.
+	// A zero MaxSize will default to 4 MiB.
+	// Setting MaxSize to -1 disables the limit.
+	MaxSize int64
+}
+
+const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size
+
+// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size
+// that is larger than its configured [UnmarshalOptions.MaxSize].
+type SizeTooLargeError struct {
+	// Size is the varint size of the message encountered
+	// that was larger than the provided MaxSize.
+	Size uint64
+
+	// MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded.
+	MaxSize uint64
+}
+
+func (e *SizeTooLargeError) Error() string {
+	return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize)
+}
+
+// Reader is the interface expected by [UnmarshalFrom].
+// It is implemented by *[bufio.Reader].
+type Reader interface {
+	io.Reader
+	io.ByteReader
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// The error is [io.EOF] error only if no bytes are read.
+// If an EOF happens after reading some but not all the bytes,
+// UnmarshalFrom returns a non-io.EOF error.
+// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged,
+// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned.
+func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error {
+	var sizeArr [binary.MaxVarintLen64]byte
+	sizeBuf := sizeArr[:0]
+	for i := range sizeArr {
+		b, err := r.ReadByte()
+		if err != nil {
+			// Immediate EOF is unexpected.
+			if err == io.EOF && i != 0 {
+				break
+			}
+			return err
+		}
+		sizeBuf = append(sizeBuf, b)
+		if b < 0x80 {
+			break
+		}
+	}
+	size, n := protowire.ConsumeVarint(sizeBuf)
+	if n < 0 {
+		return protowire.ParseError(n)
+	}
+
+	maxSize := o.MaxSize
+	if maxSize == 0 {
+		maxSize = defaultMaxSize
+	}
+	if maxSize != -1 && size > uint64(maxSize) {
+		return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "")
+	}
+
+	var b []byte
+	var err error
+	if br, ok := r.(*bufio.Reader); ok {
+		// Use the []byte from the bufio.Reader instead of having to allocate one.
+		// This reduces CPU usage and allocated bytes.
+		b, err = br.Peek(int(size))
+		if err == nil {
+			defer br.Discard(int(size))
+		} else {
+			b = nil
+		}
+	}
+	if b == nil {
+		b = make([]byte, size)
+		_, err = io.ReadFull(r, b)
+	}
+
+	if err == io.EOF {
+		return io.ErrUnexpectedEOF
+	}
+	if err != nil {
+		return err
+	}
+	if err := o.Unmarshal(b, m); err != nil {
+		return err
+	}
+	return nil
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r with the default options.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// See the documentation for [UnmarshalOptions.UnmarshalFrom].
+func UnmarshalFrom(r Reader, m proto.Message) error {
+	return UnmarshalOptions{}.UnmarshalFrom(r, m)
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index 5f28148d80..f47902371a 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -11,6 +11,7 @@ import (
 	"strconv"
 	"strings"
 
+	"google.golang.org/protobuf/encoding/protowire"
 	"google.golang.org/protobuf/internal/encoding/json"
 	"google.golang.org/protobuf/internal/encoding/messageset"
 	"google.golang.org/protobuf/internal/errors"
@@ -23,7 +24,7 @@ import (
 	"google.golang.org/protobuf/reflect/protoregistry"
 )
 
-// Unmarshal reads the given []byte into the given proto.Message.
+// Unmarshal reads the given []byte into the given [proto.Message].
 // The provided message must be mutable (e.g., a non-nil pointer to a message).
 func Unmarshal(b []byte, m proto.Message) error {
 	return UnmarshalOptions{}.Unmarshal(b, m)
@@ -37,7 +38,7 @@ type UnmarshalOptions struct {
 	// required fields will not return an error.
 	AllowPartial bool
 
-	// If DiscardUnknown is set, unknown fields are ignored.
+	// If DiscardUnknown is set, unknown fields and enum name values are ignored.
 	DiscardUnknown bool
 
 	// Resolver is used for looking up types when unmarshaling
@@ -47,9 +48,13 @@ type UnmarshalOptions struct {
 		protoregistry.MessageTypeResolver
 		protoregistry.ExtensionTypeResolver
 	}
+
+	// RecursionLimit limits how deeply messages may be nested.
+	// If zero, a default limit is applied.
+	RecursionLimit int
 }
 
-// Unmarshal reads the given []byte and populates the given proto.Message
+// Unmarshal reads the given []byte and populates the given [proto.Message]
 // using options in the UnmarshalOptions object.
 // It will clear the message first before setting the fields.
 // If it returns an error, the given message may be partially set.
@@ -67,6 +72,9 @@ func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
 	if o.Resolver == nil {
 		o.Resolver = protoregistry.GlobalTypes
 	}
+	if o.RecursionLimit == 0 {
+		o.RecursionLimit = protowire.DefaultRecursionLimit
+	}
 
 	dec := decoder{json.NewDecoder(b), o}
 	if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
@@ -114,6 +122,10 @@ func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
 
 // unmarshalMessage unmarshals a message into the given protoreflect.Message.
 func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) error {
+	d.opts.RecursionLimit--
+	if d.opts.RecursionLimit < 0 {
+		return errors.New("exceeded max recursion depth")
+	}
 	if unmarshal := wellKnownTypeUnmarshaler(m.Descriptor().FullName()); unmarshal != nil {
 		return unmarshal(d, m)
 	}
@@ -266,7 +278,9 @@ func (d decoder) unmarshalSingular(m protoreflect.Message, fd protoreflect.Field
 	if err != nil {
 		return err
 	}
-	m.Set(fd, val)
+	if val.IsValid() {
+		m.Set(fd, val)
+	}
 	return nil
 }
 
@@ -329,7 +343,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.
 		}
 
 	case protoreflect.EnumKind:
-		if v, ok := unmarshalEnum(tok, fd); ok {
+		if v, ok := unmarshalEnum(tok, fd, d.opts.DiscardUnknown); ok {
 			return v, nil
 		}
 
@@ -474,7 +488,7 @@ func unmarshalBytes(tok json.Token) (protoreflect.Value, bool) {
 	return protoreflect.ValueOfBytes(b), true
 }
 
-func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflect.Value, bool) {
+func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor, discardUnknown bool) (protoreflect.Value, bool) {
 	switch tok.Kind() {
 	case json.String:
 		// Lookup EnumNumber based on name.
@@ -482,6 +496,9 @@ func unmarshalEnum(tok json.Token, fd protoreflect.FieldDescriptor) (protoreflec
 		if enumVal := fd.Enum().Values().ByName(protoreflect.Name(s)); enumVal != nil {
 			return protoreflect.ValueOfEnum(enumVal.Number()), true
 		}
+		if discardUnknown {
+			return protoreflect.Value{}, true
+		}
 
 	case json.Number:
 		if n, ok := tok.Int(32); ok {
@@ -542,7 +559,9 @@ func (d decoder) unmarshalList(list protoreflect.List, fd protoreflect.FieldDesc
 			if err != nil {
 				return err
 			}
-			list.Append(val)
+			if val.IsValid() {
+				list.Append(val)
+			}
 		}
 	}
 
@@ -609,8 +628,9 @@ Loop:
 		if err != nil {
 			return err
 		}
-
-		mmap.Set(pkey, pval)
+		if pval.IsValid() {
+			mmap.Set(pkey, pval)
+		}
 	}
 
 	return nil
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
index 21d5d2cb18..ae71007c18 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/doc.go
@@ -6,6 +6,6 @@
 // format. It follows the guide at
 // https://protobuf.dev/programming-guides/proto3#json.
 //
-// This package produces a different output than the standard "encoding/json"
+// This package produces a different output than the standard [encoding/json]
 // package, which does not operate correctly on protocol buffer messages.
 package protojson
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
index 66b95870e9..3f75098b6f 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -31,7 +31,7 @@ func Format(m proto.Message) string {
 	return MarshalOptions{Multiline: true}.Format(m)
 }
 
-// Marshal writes the given proto.Message in JSON format using default options.
+// Marshal writes the given [proto.Message] in JSON format using default options.
 // Do not depend on the output being stable. It may change over time across
 // different versions of the program.
 func Marshal(m proto.Message) ([]byte, error) {
@@ -81,6 +81,25 @@ type MarshalOptions struct {
 	//  ╚═══════╧════════════════════════════╝
 	EmitUnpopulated bool
 
+	// EmitDefaultValues specifies whether to emit default-valued primitive fields,
+	// empty lists, and empty maps. The fields affected are as follows:
+	//  ╔═══════╤════════════════════════════════════════╗
+	//  ║ JSON  │ Protobuf field                         ║
+	//  ╠═══════╪════════════════════════════════════════╣
+	//  ║ false │ non-optional scalar boolean fields     ║
+	//  ║ 0     │ non-optional scalar numeric fields     ║
+	//  ║ ""    │ non-optional scalar string/byte fields ║
+	//  ║ []    │ empty repeated fields                  ║
+	//  ║ {}    │ empty map fields                       ║
+	//  ╚═══════╧════════════════════════════════════════╝
+	//
+	// Behaves similarly to EmitUnpopulated, but does not emit "null"-value fields,
+	// i.e. presence-sensing fields that are omitted will remain omitted to preserve
+	// presence-sensing.
+	// EmitUnpopulated takes precedence over EmitDefaultValues since the former generates
+	// a strict superset of the latter.
+	EmitDefaultValues bool
+
 	// Resolver is used for looking up types when expanding google.protobuf.Any
 	// messages. If nil, this defaults to using protoregistry.GlobalTypes.
 	Resolver interface {
@@ -102,7 +121,7 @@ func (o MarshalOptions) Format(m proto.Message) string {
 	return string(b)
 }
 
-// Marshal marshals the given proto.Message in the JSON format using options in
+// Marshal marshals the given [proto.Message] in the JSON format using options in
 // MarshalOptions. Do not depend on the output being stable. It may change over
 // time across different versions of the program.
 func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
@@ -178,7 +197,11 @@ func (m typeURLFieldRanger) Range(f func(protoreflect.FieldDescriptor, protorefl
 
 // unpopulatedFieldRanger wraps a protoreflect.Message and modifies its Range
 // method to additionally iterate over unpopulated fields.
-type unpopulatedFieldRanger struct{ protoreflect.Message }
+type unpopulatedFieldRanger struct {
+	protoreflect.Message
+
+	skipNull bool
+}
 
 func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
 	fds := m.Descriptor().Fields()
@@ -192,6 +215,9 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto
 		isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
 		isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
 		if isProto2Scalar || isSingularMessage {
+			if m.skipNull {
+				continue
+			}
 			v = protoreflect.Value{} // use invalid value to emit null
 		}
 		if !f(fd, v) {
@@ -217,8 +243,11 @@ func (e encoder) marshalMessage(m protoreflect.Message, typeURL string) error {
 	defer e.EndObject()
 
 	var fields order.FieldRanger = m
-	if e.opts.EmitUnpopulated {
-		fields = unpopulatedFieldRanger{m}
+	switch {
+	case e.opts.EmitUnpopulated:
+		fields = unpopulatedFieldRanger{Message: m, skipNull: false}
+	case e.opts.EmitDefaultValues:
+		fields = unpopulatedFieldRanger{Message: m, skipNull: true}
 	}
 	if typeURL != "" {
 		fields = typeURLFieldRanger{fields, typeURL}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
index 6c37d41744..25329b7692 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -176,7 +176,7 @@ func (d decoder) unmarshalAny(m protoreflect.Message) error {
 	// Use another decoder to parse the unread bytes for @type field. This
 	// avoids advancing a read from current decoder because the current JSON
 	// object may contain the fields of the embedded type.
-	dec := decoder{d.Clone(), UnmarshalOptions{}}
+	dec := decoder{d.Clone(), UnmarshalOptions{RecursionLimit: d.opts.RecursionLimit}}
 	tok, err := findTypeURL(dec)
 	switch err {
 	case errEmptyObject:
@@ -308,48 +308,25 @@ Loop:
 // array) in order to advance the read to the next JSON value. It relies on
 // the decoder returning an error if the types are not in valid sequence.
 func (d decoder) skipJSONValue() error {
-	tok, err := d.Read()
-	if err != nil {
-		return err
-	}
-	// Only need to continue reading for objects and arrays.
-	switch tok.Kind() {
-	case json.ObjectOpen:
-		for {
-			tok, err := d.Read()
-			if err != nil {
-				return err
-			}
-			switch tok.Kind() {
-			case json.ObjectClose:
-				return nil
-			case json.Name:
-				// Skip object field value.
-				if err := d.skipJSONValue(); err != nil {
-					return err
-				}
+	var open int
+	for {
+		tok, err := d.Read()
+		if err != nil {
+			return err
+		}
+		switch tok.Kind() {
+		case json.ObjectClose, json.ArrayClose:
+			open--
+		case json.ObjectOpen, json.ArrayOpen:
+			open++
+			if open > d.opts.RecursionLimit {
+				return errors.New("exceeded max recursion depth")
 			}
 		}
-
-	case json.ArrayOpen:
-		for {
-			tok, err := d.Peek()
-			if err != nil {
-				return err
-			}
-			switch tok.Kind() {
-			case json.ArrayClose:
-				d.Read()
-				return nil
-			default:
-				// Skip array item.
-				if err := d.skipJSONValue(); err != nil {
-					return err
-				}
-			}
+		if open == 0 {
+			return nil
 		}
 	}
-	return nil
 }
 
 // unmarshalAnyValue unmarshals the given custom-type message from the JSON
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index 4921b2d4a7..a45f112bce 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -21,7 +21,7 @@ import (
 	"google.golang.org/protobuf/reflect/protoregistry"
 )
 
-// Unmarshal reads the given []byte into the given proto.Message.
+// Unmarshal reads the given []byte into the given [proto.Message].
 // The provided message must be mutable (e.g., a non-nil pointer to a message).
 func Unmarshal(b []byte, m proto.Message) error {
 	return UnmarshalOptions{}.Unmarshal(b, m)
@@ -51,7 +51,7 @@ type UnmarshalOptions struct {
 	}
 }
 
-// Unmarshal reads the given []byte and populates the given proto.Message
+// Unmarshal reads the given []byte and populates the given [proto.Message]
 // using options in the UnmarshalOptions object.
 // The provided message must be mutable (e.g., a non-nil pointer to a message).
 func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
@@ -739,7 +739,9 @@ func (d decoder) skipValue() error {
 			case text.ListClose:
 				return nil
 			case text.MessageOpen:
-				return d.skipMessageValue()
+				if err := d.skipMessageValue(); err != nil {
+					return err
+				}
 			default:
 				// Skip items. This will not validate whether skipped values are
 				// of the same type or not, same behavior as C++
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
index 722a7b41df..95967e8112 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -33,7 +33,7 @@ func Format(m proto.Message) string {
 	return MarshalOptions{Multiline: true}.Format(m)
 }
 
-// Marshal writes the given proto.Message in textproto format using default
+// Marshal writes the given [proto.Message] in textproto format using default
 // options. Do not depend on the output being stable. It may change over time
 // across different versions of the program.
 func Marshal(m proto.Message) ([]byte, error) {
@@ -97,7 +97,7 @@ func (o MarshalOptions) Format(m proto.Message) string {
 	return string(b)
 }
 
-// Marshal writes the given proto.Message in textproto format using options in
+// Marshal writes the given [proto.Message] in textproto format using options in
 // MarshalOptions object. Do not depend on the output being stable. It may
 // change over time across different versions of the program.
 func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index f4b4686cf9..e942bc983e 100644
--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -6,7 +6,7 @@
 // See https://protobuf.dev/programming-guides/encoding.
 //
 // For marshaling and unmarshaling entire protobuf messages,
-// use the "google.golang.org/protobuf/proto" package instead.
+// use the [google.golang.org/protobuf/proto] package instead.
 package protowire
 
 import (
@@ -87,7 +87,7 @@ func ParseError(n int) error {
 
 // ConsumeField parses an entire field record (both tag and value) and returns
 // the field number, the wire type, and the total length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
 //
 // The total length includes the tag header and the end group marker (if the
 // field is a group).
@@ -104,8 +104,8 @@ func ConsumeField(b []byte) (Number, Type, int) {
 }
 
 // ConsumeFieldValue parses a field value and returns its length.
-// This assumes that the field Number and wire Type have already been parsed.
-// This returns a negative length upon an error (see ParseError).
+// This assumes that the field [Number] and wire [Type] have already been parsed.
+// This returns a negative length upon an error (see [ParseError]).
 //
 // When parsing a group, the length includes the end group marker and
 // the end group is verified to match the starting field number.
@@ -164,7 +164,7 @@ func AppendTag(b []byte, num Number, typ Type) []byte {
 }
 
 // ConsumeTag parses b as a varint-encoded tag, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
 func ConsumeTag(b []byte) (Number, Type, int) {
 	v, n := ConsumeVarint(b)
 	if n < 0 {
@@ -263,7 +263,7 @@ func AppendVarint(b []byte, v uint64) []byte {
 }
 
 // ConsumeVarint parses b as a varint-encoded uint64, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
 func ConsumeVarint(b []byte) (v uint64, n int) {
 	var y uint64
 	if len(b) <= 0 {
@@ -384,7 +384,7 @@ func AppendFixed32(b []byte, v uint32) []byte {
 }
 
 // ConsumeFixed32 parses b as a little-endian uint32, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
 func ConsumeFixed32(b []byte) (v uint32, n int) {
 	if len(b) < 4 {
 		return 0, errCodeTruncated
@@ -412,7 +412,7 @@ func AppendFixed64(b []byte, v uint64) []byte {
 }
 
 // ConsumeFixed64 parses b as a little-endian uint64, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
 func ConsumeFixed64(b []byte) (v uint64, n int) {
 	if len(b) < 8 {
 		return 0, errCodeTruncated
@@ -432,7 +432,7 @@ func AppendBytes(b []byte, v []byte) []byte {
 }
 
 // ConsumeBytes parses b as a length-prefixed bytes value, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
 func ConsumeBytes(b []byte) (v []byte, n int) {
 	m, n := ConsumeVarint(b)
 	if n < 0 {
@@ -456,7 +456,7 @@ func AppendString(b []byte, v string) []byte {
 }
 
 // ConsumeString parses b as a length-prefixed bytes value, reporting its length.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
 func ConsumeString(b []byte) (v string, n int) {
 	bb, n := ConsumeBytes(b)
 	return string(bb), n
@@ -471,7 +471,7 @@ func AppendGroup(b []byte, num Number, v []byte) []byte {
 // ConsumeGroup parses b as a group value until the trailing end group marker,
 // and verifies that the end marker matches the provided num. The value v
 // does not contain the end marker, while the length does contain the end marker.
-// This returns a negative length upon an error (see ParseError).
+// This returns a negative length upon an error (see [ParseError]).
 func ConsumeGroup(num Number, b []byte) (v []byte, n int) {
 	n = ConsumeFieldValue(num, StartGroupType, b)
 	if n < 0 {
@@ -495,8 +495,8 @@ func SizeGroup(num Number, n int) int {
 	return n + SizeTag(num)
 }
 
-// DecodeTag decodes the field Number and wire Type from its unified form.
-// The Number is -1 if the decoded field number overflows int32.
+// DecodeTag decodes the field [Number] and wire [Type] from its unified form.
+// The [Number] is -1 if the decoded field number overflows int32.
 // Other than overflow, this does not check for field number validity.
 func DecodeTag(x uint64) (Number, Type) {
 	// NOTE: MessageSet allows for larger field numbers than normal.
@@ -506,7 +506,7 @@ func DecodeTag(x uint64) (Number, Type) {
 	return Number(x >> 3), Type(x & 7)
 }
 
-// EncodeTag encodes the field Number and wire Type into its unified form.
+// EncodeTag encodes the field [Number] and wire [Type] into its unified form.
 func EncodeTag(num Number, typ Type) uint64 {
 	return uint64(num)<<3 | uint64(typ&7)
 }
diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
index db5248e1b5..a45625c8d1 100644
--- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
+++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
@@ -83,7 +83,13 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
 	case protoreflect.FileImports:
 		for i := 0; i < vs.Len(); i++ {
 			var rs records
-			rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
+			rv := reflect.ValueOf(vs.Get(i))
+			rs.Append(rv, []methodAndName{
+				{rv.MethodByName("Path"), "Path"},
+				{rv.MethodByName("Package"), "Package"},
+				{rv.MethodByName("IsPublic"), "IsPublic"},
+				{rv.MethodByName("IsWeak"), "IsWeak"},
+			}...)
 			ss = append(ss, "{"+rs.Join()+"}")
 		}
 		return start + joinStrings(ss, allowMulti) + end
@@ -92,34 +98,26 @@ func formatListOpt(vs list, isRoot, allowMulti bool) string {
 		for i := 0; i < vs.Len(); i++ {
 			m := reflect.ValueOf(vs).MethodByName("Get")
 			v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
-			ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue))
+			ss = append(ss, formatDescOpt(v.(protoreflect.Descriptor), false, allowMulti && !isEnumValue, nil))
 		}
 		return start + joinStrings(ss, allowMulti && isEnumValue) + end
 	}
 }
 
-// descriptorAccessors is a list of accessors to print for each descriptor.
-//
-// Do not print all accessors since some contain redundant information,
-// while others are pointers that we do not want to follow since the descriptor
-// is actually a cyclic graph.
-//
-// Using a list allows us to print the accessors in a sensible order.
-var descriptorAccessors = map[reflect.Type][]string{
-	reflect.TypeOf((*protoreflect.FileDescriptor)(nil)).Elem():      {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
-	reflect.TypeOf((*protoreflect.MessageDescriptor)(nil)).Elem():   {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
-	reflect.TypeOf((*protoreflect.FieldDescriptor)(nil)).Elem():     {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
-	reflect.TypeOf((*protoreflect.OneofDescriptor)(nil)).Elem():     {"Fields"}, // not directly used; must keep in sync with formatDescOpt
-	reflect.TypeOf((*protoreflect.EnumDescriptor)(nil)).Elem():      {"Values", "ReservedNames", "ReservedRanges"},
-	reflect.TypeOf((*protoreflect.EnumValueDescriptor)(nil)).Elem(): {"Number"},
-	reflect.TypeOf((*protoreflect.ServiceDescriptor)(nil)).Elem():   {"Methods"},
-	reflect.TypeOf((*protoreflect.MethodDescriptor)(nil)).Elem():    {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
+type methodAndName struct {
+	method reflect.Value
+	name   string
 }
 
 func FormatDesc(s fmt.State, r rune, t protoreflect.Descriptor) {
-	io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
+	io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')), nil))
 }
-func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
+
+func InternalFormatDescOptForTesting(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string {
+	return formatDescOpt(t, isRoot, allowMulti, record)
+}
+
+func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool, record func(string)) string {
 	rv := reflect.ValueOf(t)
 	rt := rv.MethodByName("ProtoType").Type().In(0)
 
@@ -129,26 +127,60 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
 	}
 
 	_, isFile := t.(protoreflect.FileDescriptor)
-	rs := records{allowMulti: allowMulti}
+	rs := records{
+		allowMulti: allowMulti,
+		record:     record,
+	}
 	if t.IsPlaceholder() {
 		if isFile {
-			rs.Append(rv, "Path", "Package", "IsPlaceholder")
+			rs.Append(rv, []methodAndName{
+				{rv.MethodByName("Path"), "Path"},
+				{rv.MethodByName("Package"), "Package"},
+				{rv.MethodByName("IsPlaceholder"), "IsPlaceholder"},
+			}...)
 		} else {
-			rs.Append(rv, "FullName", "IsPlaceholder")
+			rs.Append(rv, []methodAndName{
+				{rv.MethodByName("FullName"), "FullName"},
+				{rv.MethodByName("IsPlaceholder"), "IsPlaceholder"},
+			}...)
 		}
 	} else {
 		switch {
 		case isFile:
-			rs.Append(rv, "Syntax")
+			rs.Append(rv, methodAndName{rv.MethodByName("Syntax"), "Syntax"})
 		case isRoot:
-			rs.Append(rv, "Syntax", "FullName")
+			rs.Append(rv, []methodAndName{
+				{rv.MethodByName("Syntax"), "Syntax"},
+				{rv.MethodByName("FullName"), "FullName"},
+			}...)
 		default:
-			rs.Append(rv, "Name")
+			rs.Append(rv, methodAndName{rv.MethodByName("Name"), "Name"})
 		}
 		switch t := t.(type) {
 		case protoreflect.FieldDescriptor:
-			for _, s := range descriptorAccessors[rt] {
-				switch s {
+			accessors := []methodAndName{
+				{rv.MethodByName("Number"), "Number"},
+				{rv.MethodByName("Cardinality"), "Cardinality"},
+				{rv.MethodByName("Kind"), "Kind"},
+				{rv.MethodByName("HasJSONName"), "HasJSONName"},
+				{rv.MethodByName("JSONName"), "JSONName"},
+				{rv.MethodByName("HasPresence"), "HasPresence"},
+				{rv.MethodByName("IsExtension"), "IsExtension"},
+				{rv.MethodByName("IsPacked"), "IsPacked"},
+				{rv.MethodByName("IsWeak"), "IsWeak"},
+				{rv.MethodByName("IsList"), "IsList"},
+				{rv.MethodByName("IsMap"), "IsMap"},
+				{rv.MethodByName("MapKey"), "MapKey"},
+				{rv.MethodByName("MapValue"), "MapValue"},
+				{rv.MethodByName("HasDefault"), "HasDefault"},
+				{rv.MethodByName("Default"), "Default"},
+				{rv.MethodByName("ContainingOneof"), "ContainingOneof"},
+				{rv.MethodByName("ContainingMessage"), "ContainingMessage"},
+				{rv.MethodByName("Message"), "Message"},
+				{rv.MethodByName("Enum"), "Enum"},
+			}
+			for _, s := range accessors {
+				switch s.name {
 				case "MapKey":
 					if k := t.MapKey(); k != nil {
 						rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()})
@@ -157,20 +189,20 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
 					if v := t.MapValue(); v != nil {
 						switch v.Kind() {
 						case protoreflect.EnumKind:
-							rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
+							rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Enum().FullName())})
 						case protoreflect.MessageKind, protoreflect.GroupKind:
-							rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
+							rs.AppendRecs("MapValue", [2]string{"MapValue", string(v.Message().FullName())})
 						default:
-							rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
+							rs.AppendRecs("MapValue", [2]string{"MapValue", v.Kind().String()})
 						}
 					}
 				case "ContainingOneof":
 					if od := t.ContainingOneof(); od != nil {
-						rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())})
+						rs.AppendRecs("ContainingOneof", [2]string{"Oneof", string(od.Name())})
 					}
 				case "ContainingMessage":
 					if t.IsExtension() {
-						rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())})
+						rs.AppendRecs("ContainingMessage", [2]string{"Extendee", string(t.ContainingMessage().FullName())})
 					}
 				case "Message":
 					if !t.IsMap() {
@@ -187,13 +219,61 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
 				ss = append(ss, string(fs.Get(i).Name()))
 			}
 			if len(ss) > 0 {
-				rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
+				rs.AppendRecs("Fields", [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
 			}
-		default:
-			rs.Append(rv, descriptorAccessors[rt]...)
+
+		case protoreflect.FileDescriptor:
+			rs.Append(rv, []methodAndName{
+				{rv.MethodByName("Path"), "Path"},
+				{rv.MethodByName("Package"), "Package"},
+				{rv.MethodByName("Imports"), "Imports"},
+				{rv.MethodByName("Messages"), "Messages"},
+				{rv.MethodByName("Enums"), "Enums"},
+				{rv.MethodByName("Extensions"), "Extensions"},
+				{rv.MethodByName("Services"), "Services"},
+			}...)
+
+		case protoreflect.MessageDescriptor:
+			rs.Append(rv, []methodAndName{
+				{rv.MethodByName("IsMapEntry"), "IsMapEntry"},
+				{rv.MethodByName("Fields"), "Fields"},
+				{rv.MethodByName("Oneofs"), "Oneofs"},
+				{rv.MethodByName("ReservedNames"), "ReservedNames"},
+				{rv.MethodByName("ReservedRanges"), "ReservedRanges"},
+				{rv.MethodByName("RequiredNumbers"), "RequiredNumbers"},
+				{rv.MethodByName("ExtensionRanges"), "ExtensionRanges"},
+				{rv.MethodByName("Messages"), "Messages"},
+				{rv.MethodByName("Enums"), "Enums"},
+				{rv.MethodByName("Extensions"), "Extensions"},
+			}...)
+
+		case protoreflect.EnumDescriptor:
+			rs.Append(rv, []methodAndName{
+				{rv.MethodByName("Values"), "Values"},
+				{rv.MethodByName("ReservedNames"), "ReservedNames"},
+				{rv.MethodByName("ReservedRanges"), "ReservedRanges"},
+			}...)
+
+		case protoreflect.EnumValueDescriptor:
+			rs.Append(rv, []methodAndName{
+				{rv.MethodByName("Number"), "Number"},
+			}...)
+
+		case protoreflect.ServiceDescriptor:
+			rs.Append(rv, []methodAndName{
+				{rv.MethodByName("Methods"), "Methods"},
+			}...)
+
+		case protoreflect.MethodDescriptor:
+			rs.Append(rv, []methodAndName{
+				{rv.MethodByName("Input"), "Input"},
+				{rv.MethodByName("Output"), "Output"},
+				{rv.MethodByName("IsStreamingClient"), "IsStreamingClient"},
+				{rv.MethodByName("IsStreamingServer"), "IsStreamingServer"},
+			}...)
 		}
-		if rv.MethodByName("GoType").IsValid() {
-			rs.Append(rv, "GoType")
+		if m := rv.MethodByName("GoType"); m.IsValid() {
+			rs.Append(rv, methodAndName{m, "GoType"})
 		}
 	}
 	return start + rs.Join() + end
@@ -202,19 +282,34 @@ func formatDescOpt(t protoreflect.Descriptor, isRoot, allowMulti bool) string {
 type records struct {
 	recs       [][2]string
 	allowMulti bool
+
+	// record is a function that will be called for every Append() or
+	// AppendRecs() call, to be used for testing with the
+	// InternalFormatDescOptForTesting function.
+	record func(string)
 }
 
-func (rs *records) Append(v reflect.Value, accessors ...string) {
+func (rs *records) AppendRecs(fieldName string, newRecs [2]string) {
+	if rs.record != nil {
+		rs.record(fieldName)
+	}
+	rs.recs = append(rs.recs, newRecs)
+}
+
+func (rs *records) Append(v reflect.Value, accessors ...methodAndName) {
 	for _, a := range accessors {
+		if rs.record != nil {
+			rs.record(a.name)
+		}
 		var rv reflect.Value
-		if m := v.MethodByName(a); m.IsValid() {
-			rv = m.Call(nil)[0]
+		if a.method.IsValid() {
+			rv = a.method.Call(nil)[0]
 		}
 		if v.Kind() == reflect.Struct && !rv.IsValid() {
-			rv = v.FieldByName(a)
+			rv = v.FieldByName(a.name)
 		}
 		if !rv.IsValid() {
-			panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
+			panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a.name))
 		}
 		if _, ok := rv.Interface().(protoreflect.Value); ok {
 			rv = rv.MethodByName("Interface").Call(nil)[0]
@@ -261,7 +356,7 @@ func (rs *records) Append(v reflect.Value, accessors ...string) {
 		default:
 			s = fmt.Sprint(v)
 		}
-		rs.recs = append(rs.recs, [2]string{a, s})
+		rs.recs = append(rs.recs, [2]string{a.name, s})
 	}
 }
 
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 7c3689baee..193c68e8f9 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -21,11 +21,26 @@ import (
 	"google.golang.org/protobuf/reflect/protoregistry"
 )
 
+// Edition is an Enum for proto2.Edition
+type Edition int32
+
+// These values align with the value of Enum in descriptor.proto which allows
+// direct conversion between the proto enum and this enum.
+const (
+	EditionUnknown     Edition = 0
+	EditionProto2      Edition = 998
+	EditionProto3      Edition = 999
+	Edition2023        Edition = 1000
+	EditionUnsupported Edition = 100000
+)
+
 // The types in this file may have a suffix:
 //	• L0: Contains fields common to all descriptors (except File) and
 //	must be initialized up front.
 //	• L1: Contains fields specific to a descriptor and
-//	must be initialized up front.
+//	must be initialized up front. If the associated proto uses Editions, the
+//  Editions features must always be resolved. If not explicitly set, the
+//  appropriate default must be resolved and set.
 //	• L2: Contains fields that are lazily initialized when constructing
 //	from the raw file descriptor. When constructing as a literal, the L2
 //	fields must be initialized up front.
@@ -44,6 +59,7 @@ type (
 	}
 	FileL1 struct {
 		Syntax  protoreflect.Syntax
+		Edition Edition // Only used if Syntax == Editions
 		Path    string
 		Package protoreflect.FullName
 
@@ -51,12 +67,35 @@ type (
 		Messages   Messages
 		Extensions Extensions
 		Services   Services
+
+		EditionFeatures FileEditionFeatures
 	}
 	FileL2 struct {
 		Options   func() protoreflect.ProtoMessage
 		Imports   FileImports
 		Locations SourceLocations
 	}
+
+	FileEditionFeatures struct {
+		// IsFieldPresence is true if field_presence is EXPLICIT
+		// https://protobuf.dev/editions/features/#field_presence
+		IsFieldPresence bool
+		// IsOpenEnum is true if enum_type is OPEN
+		// https://protobuf.dev/editions/features/#enum_type
+		IsOpenEnum bool
+		// IsPacked is true if repeated_field_encoding is PACKED
+		// https://protobuf.dev/editions/features/#repeated_field_encoding
+		IsPacked bool
+		// IsUTF8Validated is true if utf_validation is VERIFY
+		// https://protobuf.dev/editions/features/#utf8_validation
+		IsUTF8Validated bool
+		// IsDelimitedEncoded is true if message_encoding is DELIMITED
+		// https://protobuf.dev/editions/features/#message_encoding
+		IsDelimitedEncoded bool
+		// IsJSONCompliant is true if json_format is ALLOW
+		// https://protobuf.dev/editions/features/#json_format
+		IsJSONCompliant bool
+	}
 )
 
 func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
@@ -210,6 +249,9 @@ type (
 		ContainingOneof  protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
 		Enum             protoreflect.EnumDescriptor
 		Message          protoreflect.MessageDescriptor
+
+		// Edition features.
+		Presence bool
 	}
 
 	Oneof struct {
@@ -273,6 +315,9 @@ func (fd *Field) HasJSONName() bool                     { return fd.L1.StringNam
 func (fd *Field) JSONName() string                      { return fd.L1.StringName.getJSON(fd) }
 func (fd *Field) TextName() string                      { return fd.L1.StringName.getText(fd) }
 func (fd *Field) HasPresence() bool {
+	if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
+		return fd.L1.Presence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
+	}
 	return fd.L1.Cardinality != protoreflect.Repeated && (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil)
 }
 func (fd *Field) HasOptionalKeyword() bool {
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index 136f1b2157..8f94230ea1 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -12,6 +12,12 @@ import (
 
 const File_google_protobuf_descriptor_proto = "google/protobuf/descriptor.proto"
 
+// Full and short names for google.protobuf.Edition.
+const (
+	Edition_enum_fullname = "google.protobuf.Edition"
+	Edition_enum_name     = "Edition"
+)
+
 // Names for google.protobuf.FileDescriptorSet.
 const (
 	FileDescriptorSet_message_name     protoreflect.Name     = "FileDescriptorSet"
@@ -81,7 +87,7 @@ const (
 	FileDescriptorProto_Options_field_number          protoreflect.FieldNumber = 8
 	FileDescriptorProto_SourceCodeInfo_field_number   protoreflect.FieldNumber = 9
 	FileDescriptorProto_Syntax_field_number           protoreflect.FieldNumber = 12
-	FileDescriptorProto_Edition_field_number          protoreflect.FieldNumber = 13
+	FileDescriptorProto_Edition_field_number          protoreflect.FieldNumber = 14
 )
 
 // Names for google.protobuf.DescriptorProto.
@@ -184,10 +190,12 @@ const (
 const (
 	ExtensionRangeOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
 	ExtensionRangeOptions_Declaration_field_name         protoreflect.Name = "declaration"
+	ExtensionRangeOptions_Features_field_name            protoreflect.Name = "features"
 	ExtensionRangeOptions_Verification_field_name        protoreflect.Name = "verification"
 
 	ExtensionRangeOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.uninterpreted_option"
 	ExtensionRangeOptions_Declaration_field_fullname         protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.declaration"
+	ExtensionRangeOptions_Features_field_fullname            protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.features"
 	ExtensionRangeOptions_Verification_field_fullname        protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.verification"
 )
 
@@ -195,6 +203,7 @@ const (
 const (
 	ExtensionRangeOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
 	ExtensionRangeOptions_Declaration_field_number         protoreflect.FieldNumber = 2
+	ExtensionRangeOptions_Features_field_number            protoreflect.FieldNumber = 50
 	ExtensionRangeOptions_Verification_field_number        protoreflect.FieldNumber = 3
 )
 
@@ -212,29 +221,26 @@ const (
 
 // Field names for google.protobuf.ExtensionRangeOptions.Declaration.
 const (
-	ExtensionRangeOptions_Declaration_Number_field_name     protoreflect.Name = "number"
-	ExtensionRangeOptions_Declaration_FullName_field_name   protoreflect.Name = "full_name"
-	ExtensionRangeOptions_Declaration_Type_field_name       protoreflect.Name = "type"
-	ExtensionRangeOptions_Declaration_IsRepeated_field_name protoreflect.Name = "is_repeated"
-	ExtensionRangeOptions_Declaration_Reserved_field_name   protoreflect.Name = "reserved"
-	ExtensionRangeOptions_Declaration_Repeated_field_name   protoreflect.Name = "repeated"
+	ExtensionRangeOptions_Declaration_Number_field_name   protoreflect.Name = "number"
+	ExtensionRangeOptions_Declaration_FullName_field_name protoreflect.Name = "full_name"
+	ExtensionRangeOptions_Declaration_Type_field_name     protoreflect.Name = "type"
+	ExtensionRangeOptions_Declaration_Reserved_field_name protoreflect.Name = "reserved"
+	ExtensionRangeOptions_Declaration_Repeated_field_name protoreflect.Name = "repeated"
 
-	ExtensionRangeOptions_Declaration_Number_field_fullname     protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number"
-	ExtensionRangeOptions_Declaration_FullName_field_fullname   protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name"
-	ExtensionRangeOptions_Declaration_Type_field_fullname       protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type"
-	ExtensionRangeOptions_Declaration_IsRepeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.is_repeated"
-	ExtensionRangeOptions_Declaration_Reserved_field_fullname   protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved"
-	ExtensionRangeOptions_Declaration_Repeated_field_fullname   protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated"
+	ExtensionRangeOptions_Declaration_Number_field_fullname   protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.number"
+	ExtensionRangeOptions_Declaration_FullName_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.full_name"
+	ExtensionRangeOptions_Declaration_Type_field_fullname     protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.type"
+	ExtensionRangeOptions_Declaration_Reserved_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.reserved"
+	ExtensionRangeOptions_Declaration_Repeated_field_fullname protoreflect.FullName = "google.protobuf.ExtensionRangeOptions.Declaration.repeated"
 )
 
 // Field numbers for google.protobuf.ExtensionRangeOptions.Declaration.
 const (
-	ExtensionRangeOptions_Declaration_Number_field_number     protoreflect.FieldNumber = 1
-	ExtensionRangeOptions_Declaration_FullName_field_number   protoreflect.FieldNumber = 2
-	ExtensionRangeOptions_Declaration_Type_field_number       protoreflect.FieldNumber = 3
-	ExtensionRangeOptions_Declaration_IsRepeated_field_number protoreflect.FieldNumber = 4
-	ExtensionRangeOptions_Declaration_Reserved_field_number   protoreflect.FieldNumber = 5
-	ExtensionRangeOptions_Declaration_Repeated_field_number   protoreflect.FieldNumber = 6
+	ExtensionRangeOptions_Declaration_Number_field_number   protoreflect.FieldNumber = 1
+	ExtensionRangeOptions_Declaration_FullName_field_number protoreflect.FieldNumber = 2
+	ExtensionRangeOptions_Declaration_Type_field_number     protoreflect.FieldNumber = 3
+	ExtensionRangeOptions_Declaration_Reserved_field_number protoreflect.FieldNumber = 5
+	ExtensionRangeOptions_Declaration_Repeated_field_number protoreflect.FieldNumber = 6
 )
 
 // Names for google.protobuf.FieldDescriptorProto.
@@ -478,6 +484,7 @@ const (
 	FileOptions_PhpNamespace_field_name              protoreflect.Name = "php_namespace"
 	FileOptions_PhpMetadataNamespace_field_name      protoreflect.Name = "php_metadata_namespace"
 	FileOptions_RubyPackage_field_name               protoreflect.Name = "ruby_package"
+	FileOptions_Features_field_name                  protoreflect.Name = "features"
 	FileOptions_UninterpretedOption_field_name       protoreflect.Name = "uninterpreted_option"
 
 	FileOptions_JavaPackage_field_fullname               protoreflect.FullName = "google.protobuf.FileOptions.java_package"
@@ -500,6 +507,7 @@ const (
 	FileOptions_PhpNamespace_field_fullname              protoreflect.FullName = "google.protobuf.FileOptions.php_namespace"
 	FileOptions_PhpMetadataNamespace_field_fullname      protoreflect.FullName = "google.protobuf.FileOptions.php_metadata_namespace"
 	FileOptions_RubyPackage_field_fullname               protoreflect.FullName = "google.protobuf.FileOptions.ruby_package"
+	FileOptions_Features_field_fullname                  protoreflect.FullName = "google.protobuf.FileOptions.features"
 	FileOptions_UninterpretedOption_field_fullname       protoreflect.FullName = "google.protobuf.FileOptions.uninterpreted_option"
 )
 
@@ -525,6 +533,7 @@ const (
 	FileOptions_PhpNamespace_field_number              protoreflect.FieldNumber = 41
 	FileOptions_PhpMetadataNamespace_field_number      protoreflect.FieldNumber = 44
 	FileOptions_RubyPackage_field_number               protoreflect.FieldNumber = 45
+	FileOptions_Features_field_number                  protoreflect.FieldNumber = 50
 	FileOptions_UninterpretedOption_field_number       protoreflect.FieldNumber = 999
 )
 
@@ -547,6 +556,7 @@ const (
 	MessageOptions_Deprecated_field_name                         protoreflect.Name = "deprecated"
 	MessageOptions_MapEntry_field_name                           protoreflect.Name = "map_entry"
 	MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
+	MessageOptions_Features_field_name                           protoreflect.Name = "features"
 	MessageOptions_UninterpretedOption_field_name                protoreflect.Name = "uninterpreted_option"
 
 	MessageOptions_MessageSetWireFormat_field_fullname               protoreflect.FullName = "google.protobuf.MessageOptions.message_set_wire_format"
@@ -554,6 +564,7 @@ const (
 	MessageOptions_Deprecated_field_fullname                         protoreflect.FullName = "google.protobuf.MessageOptions.deprecated"
 	MessageOptions_MapEntry_field_fullname                           protoreflect.FullName = "google.protobuf.MessageOptions.map_entry"
 	MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.MessageOptions.deprecated_legacy_json_field_conflicts"
+	MessageOptions_Features_field_fullname                           protoreflect.FullName = "google.protobuf.MessageOptions.features"
 	MessageOptions_UninterpretedOption_field_fullname                protoreflect.FullName = "google.protobuf.MessageOptions.uninterpreted_option"
 )
 
@@ -564,6 +575,7 @@ const (
 	MessageOptions_Deprecated_field_number                         protoreflect.FieldNumber = 3
 	MessageOptions_MapEntry_field_number                           protoreflect.FieldNumber = 7
 	MessageOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 11
+	MessageOptions_Features_field_number                           protoreflect.FieldNumber = 12
 	MessageOptions_UninterpretedOption_field_number                protoreflect.FieldNumber = 999
 )
 
@@ -584,8 +596,9 @@ const (
 	FieldOptions_Weak_field_name                protoreflect.Name = "weak"
 	FieldOptions_DebugRedact_field_name         protoreflect.Name = "debug_redact"
 	FieldOptions_Retention_field_name           protoreflect.Name = "retention"
-	FieldOptions_Target_field_name              protoreflect.Name = "target"
 	FieldOptions_Targets_field_name             protoreflect.Name = "targets"
+	FieldOptions_EditionDefaults_field_name     protoreflect.Name = "edition_defaults"
+	FieldOptions_Features_field_name            protoreflect.Name = "features"
 	FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
 
 	FieldOptions_Ctype_field_fullname               protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
@@ -597,8 +610,9 @@ const (
 	FieldOptions_Weak_field_fullname                protoreflect.FullName = "google.protobuf.FieldOptions.weak"
 	FieldOptions_DebugRedact_field_fullname         protoreflect.FullName = "google.protobuf.FieldOptions.debug_redact"
 	FieldOptions_Retention_field_fullname           protoreflect.FullName = "google.protobuf.FieldOptions.retention"
-	FieldOptions_Target_field_fullname              protoreflect.FullName = "google.protobuf.FieldOptions.target"
 	FieldOptions_Targets_field_fullname             protoreflect.FullName = "google.protobuf.FieldOptions.targets"
+	FieldOptions_EditionDefaults_field_fullname     protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults"
+	FieldOptions_Features_field_fullname            protoreflect.FullName = "google.protobuf.FieldOptions.features"
 	FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
 )
 
@@ -613,8 +627,9 @@ const (
 	FieldOptions_Weak_field_number                protoreflect.FieldNumber = 10
 	FieldOptions_DebugRedact_field_number         protoreflect.FieldNumber = 16
 	FieldOptions_Retention_field_number           protoreflect.FieldNumber = 17
-	FieldOptions_Target_field_number              protoreflect.FieldNumber = 18
 	FieldOptions_Targets_field_number             protoreflect.FieldNumber = 19
+	FieldOptions_EditionDefaults_field_number     protoreflect.FieldNumber = 20
+	FieldOptions_Features_field_number            protoreflect.FieldNumber = 21
 	FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
 )
 
@@ -642,6 +657,27 @@ const (
 	FieldOptions_OptionTargetType_enum_name     = "OptionTargetType"
 )
 
+// Names for google.protobuf.FieldOptions.EditionDefault.
+const (
+	FieldOptions_EditionDefault_message_name     protoreflect.Name     = "EditionDefault"
+	FieldOptions_EditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault"
+)
+
+// Field names for google.protobuf.FieldOptions.EditionDefault.
+const (
+	FieldOptions_EditionDefault_Edition_field_name protoreflect.Name = "edition"
+	FieldOptions_EditionDefault_Value_field_name   protoreflect.Name = "value"
+
+	FieldOptions_EditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.edition"
+	FieldOptions_EditionDefault_Value_field_fullname   protoreflect.FullName = "google.protobuf.FieldOptions.EditionDefault.value"
+)
+
+// Field numbers for google.protobuf.FieldOptions.EditionDefault.
+const (
+	FieldOptions_EditionDefault_Edition_field_number protoreflect.FieldNumber = 3
+	FieldOptions_EditionDefault_Value_field_number   protoreflect.FieldNumber = 2
+)
+
 // Names for google.protobuf.OneofOptions.
 const (
 	OneofOptions_message_name     protoreflect.Name     = "OneofOptions"
@@ -650,13 +686,16 @@ const (
 
 // Field names for google.protobuf.OneofOptions.
 const (
+	OneofOptions_Features_field_name            protoreflect.Name = "features"
 	OneofOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
 
+	OneofOptions_Features_field_fullname            protoreflect.FullName = "google.protobuf.OneofOptions.features"
 	OneofOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.OneofOptions.uninterpreted_option"
 )
 
 // Field numbers for google.protobuf.OneofOptions.
 const (
+	OneofOptions_Features_field_number            protoreflect.FieldNumber = 1
 	OneofOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
 )
 
@@ -671,11 +710,13 @@ const (
 	EnumOptions_AllowAlias_field_name                         protoreflect.Name = "allow_alias"
 	EnumOptions_Deprecated_field_name                         protoreflect.Name = "deprecated"
 	EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_name protoreflect.Name = "deprecated_legacy_json_field_conflicts"
+	EnumOptions_Features_field_name                           protoreflect.Name = "features"
 	EnumOptions_UninterpretedOption_field_name                protoreflect.Name = "uninterpreted_option"
 
 	EnumOptions_AllowAlias_field_fullname                         protoreflect.FullName = "google.protobuf.EnumOptions.allow_alias"
 	EnumOptions_Deprecated_field_fullname                         protoreflect.FullName = "google.protobuf.EnumOptions.deprecated"
 	EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_fullname protoreflect.FullName = "google.protobuf.EnumOptions.deprecated_legacy_json_field_conflicts"
+	EnumOptions_Features_field_fullname                           protoreflect.FullName = "google.protobuf.EnumOptions.features"
 	EnumOptions_UninterpretedOption_field_fullname                protoreflect.FullName = "google.protobuf.EnumOptions.uninterpreted_option"
 )
 
@@ -684,6 +725,7 @@ const (
 	EnumOptions_AllowAlias_field_number                         protoreflect.FieldNumber = 2
 	EnumOptions_Deprecated_field_number                         protoreflect.FieldNumber = 3
 	EnumOptions_DeprecatedLegacyJsonFieldConflicts_field_number protoreflect.FieldNumber = 6
+	EnumOptions_Features_field_number                           protoreflect.FieldNumber = 7
 	EnumOptions_UninterpretedOption_field_number                protoreflect.FieldNumber = 999
 )
 
@@ -696,15 +738,21 @@ const (
 // Field names for google.protobuf.EnumValueOptions.
 const (
 	EnumValueOptions_Deprecated_field_name          protoreflect.Name = "deprecated"
+	EnumValueOptions_Features_field_name            protoreflect.Name = "features"
+	EnumValueOptions_DebugRedact_field_name         protoreflect.Name = "debug_redact"
 	EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
 
 	EnumValueOptions_Deprecated_field_fullname          protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
+	EnumValueOptions_Features_field_fullname            protoreflect.FullName = "google.protobuf.EnumValueOptions.features"
+	EnumValueOptions_DebugRedact_field_fullname         protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact"
 	EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
 )
 
 // Field numbers for google.protobuf.EnumValueOptions.
 const (
 	EnumValueOptions_Deprecated_field_number          protoreflect.FieldNumber = 1
+	EnumValueOptions_Features_field_number            protoreflect.FieldNumber = 2
+	EnumValueOptions_DebugRedact_field_number         protoreflect.FieldNumber = 3
 	EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
 )
 
@@ -716,15 +764,18 @@ const (
 
 // Field names for google.protobuf.ServiceOptions.
 const (
+	ServiceOptions_Features_field_name            protoreflect.Name = "features"
 	ServiceOptions_Deprecated_field_name          protoreflect.Name = "deprecated"
 	ServiceOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
 
+	ServiceOptions_Features_field_fullname            protoreflect.FullName = "google.protobuf.ServiceOptions.features"
 	ServiceOptions_Deprecated_field_fullname          protoreflect.FullName = "google.protobuf.ServiceOptions.deprecated"
 	ServiceOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.ServiceOptions.uninterpreted_option"
 )
 
 // Field numbers for google.protobuf.ServiceOptions.
 const (
+	ServiceOptions_Features_field_number            protoreflect.FieldNumber = 34
 	ServiceOptions_Deprecated_field_number          protoreflect.FieldNumber = 33
 	ServiceOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
 )
@@ -739,10 +790,12 @@ const (
 const (
 	MethodOptions_Deprecated_field_name          protoreflect.Name = "deprecated"
 	MethodOptions_IdempotencyLevel_field_name    protoreflect.Name = "idempotency_level"
+	MethodOptions_Features_field_name            protoreflect.Name = "features"
 	MethodOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
 
 	MethodOptions_Deprecated_field_fullname          protoreflect.FullName = "google.protobuf.MethodOptions.deprecated"
 	MethodOptions_IdempotencyLevel_field_fullname    protoreflect.FullName = "google.protobuf.MethodOptions.idempotency_level"
+	MethodOptions_Features_field_fullname            protoreflect.FullName = "google.protobuf.MethodOptions.features"
 	MethodOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.MethodOptions.uninterpreted_option"
 )
 
@@ -750,6 +803,7 @@ const (
 const (
 	MethodOptions_Deprecated_field_number          protoreflect.FieldNumber = 33
 	MethodOptions_IdempotencyLevel_field_number    protoreflect.FieldNumber = 34
+	MethodOptions_Features_field_number            protoreflect.FieldNumber = 35
 	MethodOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
 )
 
@@ -816,6 +870,120 @@ const (
 	UninterpretedOption_NamePart_IsExtension_field_number protoreflect.FieldNumber = 2
 )
 
+// Names for google.protobuf.FeatureSet.
+const (
+	FeatureSet_message_name     protoreflect.Name     = "FeatureSet"
+	FeatureSet_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet"
+)
+
+// Field names for google.protobuf.FeatureSet.
+const (
+	FeatureSet_FieldPresence_field_name         protoreflect.Name = "field_presence"
+	FeatureSet_EnumType_field_name              protoreflect.Name = "enum_type"
+	FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
+	FeatureSet_Utf8Validation_field_name        protoreflect.Name = "utf8_validation"
+	FeatureSet_MessageEncoding_field_name       protoreflect.Name = "message_encoding"
+	FeatureSet_JsonFormat_field_name            protoreflect.Name = "json_format"
+
+	FeatureSet_FieldPresence_field_fullname         protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
+	FeatureSet_EnumType_field_fullname              protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
+	FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
+	FeatureSet_Utf8Validation_field_fullname        protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
+	FeatureSet_MessageEncoding_field_fullname       protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
+	FeatureSet_JsonFormat_field_fullname            protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+)
+
+// Field numbers for google.protobuf.FeatureSet.
+const (
+	FeatureSet_FieldPresence_field_number         protoreflect.FieldNumber = 1
+	FeatureSet_EnumType_field_number              protoreflect.FieldNumber = 2
+	FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
+	FeatureSet_Utf8Validation_field_number        protoreflect.FieldNumber = 4
+	FeatureSet_MessageEncoding_field_number       protoreflect.FieldNumber = 5
+	FeatureSet_JsonFormat_field_number            protoreflect.FieldNumber = 6
+)
+
+// Full and short names for google.protobuf.FeatureSet.FieldPresence.
+const (
+	FeatureSet_FieldPresence_enum_fullname = "google.protobuf.FeatureSet.FieldPresence"
+	FeatureSet_FieldPresence_enum_name     = "FieldPresence"
+)
+
+// Full and short names for google.protobuf.FeatureSet.EnumType.
+const (
+	FeatureSet_EnumType_enum_fullname = "google.protobuf.FeatureSet.EnumType"
+	FeatureSet_EnumType_enum_name     = "EnumType"
+)
+
+// Full and short names for google.protobuf.FeatureSet.RepeatedFieldEncoding.
+const (
+	FeatureSet_RepeatedFieldEncoding_enum_fullname = "google.protobuf.FeatureSet.RepeatedFieldEncoding"
+	FeatureSet_RepeatedFieldEncoding_enum_name     = "RepeatedFieldEncoding"
+)
+
+// Full and short names for google.protobuf.FeatureSet.Utf8Validation.
+const (
+	FeatureSet_Utf8Validation_enum_fullname = "google.protobuf.FeatureSet.Utf8Validation"
+	FeatureSet_Utf8Validation_enum_name     = "Utf8Validation"
+)
+
+// Full and short names for google.protobuf.FeatureSet.MessageEncoding.
+const (
+	FeatureSet_MessageEncoding_enum_fullname = "google.protobuf.FeatureSet.MessageEncoding"
+	FeatureSet_MessageEncoding_enum_name     = "MessageEncoding"
+)
+
+// Full and short names for google.protobuf.FeatureSet.JsonFormat.
+const (
+	FeatureSet_JsonFormat_enum_fullname = "google.protobuf.FeatureSet.JsonFormat"
+	FeatureSet_JsonFormat_enum_name     = "JsonFormat"
+)
+
+// Names for google.protobuf.FeatureSetDefaults.
+const (
+	FeatureSetDefaults_message_name     protoreflect.Name     = "FeatureSetDefaults"
+	FeatureSetDefaults_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults"
+)
+
+// Field names for google.protobuf.FeatureSetDefaults.
+const (
+	FeatureSetDefaults_Defaults_field_name       protoreflect.Name = "defaults"
+	FeatureSetDefaults_MinimumEdition_field_name protoreflect.Name = "minimum_edition"
+	FeatureSetDefaults_MaximumEdition_field_name protoreflect.Name = "maximum_edition"
+
+	FeatureSetDefaults_Defaults_field_fullname       protoreflect.FullName = "google.protobuf.FeatureSetDefaults.defaults"
+	FeatureSetDefaults_MinimumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.minimum_edition"
+	FeatureSetDefaults_MaximumEdition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.maximum_edition"
+)
+
+// Field numbers for google.protobuf.FeatureSetDefaults.
+const (
+	FeatureSetDefaults_Defaults_field_number       protoreflect.FieldNumber = 1
+	FeatureSetDefaults_MinimumEdition_field_number protoreflect.FieldNumber = 4
+	FeatureSetDefaults_MaximumEdition_field_number protoreflect.FieldNumber = 5
+)
+
+// Names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
+const (
+	FeatureSetDefaults_FeatureSetEditionDefault_message_name     protoreflect.Name     = "FeatureSetEditionDefault"
+	FeatureSetDefaults_FeatureSetEditionDefault_message_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault"
+)
+
+// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
+const (
+	FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name  protoreflect.Name = "edition"
+	FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features"
+
+	FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname  protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
+	FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features"
+)
+
+// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
+const (
+	FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number  protoreflect.FieldNumber = 3
+	FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2
+)
+
 // Names for google.protobuf.SourceCodeInfo.
 const (
 	SourceCodeInfo_message_name     protoreflect.Name     = "SourceCodeInfo"
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
index 1a509b63eb..f55dc01e3a 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go
@@ -162,11 +162,20 @@ func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions
 func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.BoolSlice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := 0
+		for _, v := range b {
+			if v < 0x80 {
+				count++
+			}
+		}
+		if count > 0 {
+			p.growBoolSlice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			var v uint64
 			var n int
@@ -732,11 +741,20 @@ func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption
 func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int32Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := 0
+		for _, v := range b {
+			if v < 0x80 {
+				count++
+			}
+		}
+		if count > 0 {
+			p.growInt32Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			var v uint64
 			var n int
@@ -1138,11 +1156,20 @@ func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
 func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int32Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := 0
+		for _, v := range b {
+			if v < 0x80 {
+				count++
+			}
+		}
+		if count > 0 {
+			p.growInt32Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			var v uint64
 			var n int
@@ -1544,11 +1571,20 @@ func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
 func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Uint32Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := 0
+		for _, v := range b {
+			if v < 0x80 {
+				count++
+			}
+		}
+		if count > 0 {
+			p.growUint32Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			var v uint64
 			var n int
@@ -1950,11 +1986,20 @@ func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption
 func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int64Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := 0
+		for _, v := range b {
+			if v < 0x80 {
+				count++
+			}
+		}
+		if count > 0 {
+			p.growInt64Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			var v uint64
 			var n int
@@ -2356,11 +2401,20 @@ func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
 func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int64Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := 0
+		for _, v := range b {
+			if v < 0x80 {
+				count++
+			}
+		}
+		if count > 0 {
+			p.growInt64Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			var v uint64
 			var n int
@@ -2762,11 +2816,20 @@ func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
 func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Uint64Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := 0
+		for _, v := range b {
+			if v < 0x80 {
+				count++
+			}
+		}
+		if count > 0 {
+			p.growUint64Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			var v uint64
 			var n int
@@ -3145,11 +3208,15 @@ func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt
 func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int32Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := len(b) / protowire.SizeFixed32()
+		if count > 0 {
+			p.growInt32Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed32(b)
 			if n < 0 {
@@ -3461,11 +3528,15 @@ func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti
 func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Uint32Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := len(b) / protowire.SizeFixed32()
+		if count > 0 {
+			p.growUint32Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed32(b)
 			if n < 0 {
@@ -3777,11 +3848,15 @@ func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOption
 func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Float32Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := len(b) / protowire.SizeFixed32()
+		if count > 0 {
+			p.growFloat32Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed32(b)
 			if n < 0 {
@@ -4093,11 +4168,15 @@ func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpt
 func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Int64Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := len(b) / protowire.SizeFixed64()
+		if count > 0 {
+			p.growInt64Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed64(b)
 			if n < 0 {
@@ -4409,11 +4488,15 @@ func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, opts marshalOpti
 func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Uint64Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := len(b) / protowire.SizeFixed64()
+		if count > 0 {
+			p.growUint64Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed64(b)
 			if n < 0 {
@@ -4725,11 +4808,15 @@ func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptio
 func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
 	sp := p.Float64Slice()
 	if wtyp == protowire.BytesType {
-		s := *sp
 		b, n := protowire.ConsumeBytes(b)
 		if n < 0 {
 			return out, errDecode
 		}
+		count := len(b) / protowire.SizeFixed64()
+		if count > 0 {
+			p.growFloat64Slice(count)
+		}
+		s := *sp
 		for len(b) > 0 {
 			v, n := protowire.ConsumeFixed64(b)
 			if n < 0 {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index 61c483fac0..2ab2c62978 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -206,13 +206,18 @@ func aberrantLoadMessageDescReentrant(t reflect.Type, name protoreflect.FullName
 
 	// Obtain a list of oneof wrapper types.
 	var oneofWrappers []reflect.Type
-	for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
-		if fn, ok := t.MethodByName(method); ok {
-			for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
-				if vs, ok := v.Interface().([]interface{}); ok {
-					for _, v := range vs {
-						oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
-					}
+	methods := make([]reflect.Method, 0, 2)
+	if m, ok := t.MethodByName("XXX_OneofFuncs"); ok {
+		methods = append(methods, m)
+	}
+	if m, ok := t.MethodByName("XXX_OneofWrappers"); ok {
+		methods = append(methods, m)
+	}
+	for _, fn := range methods {
+		for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+			if vs, ok := v.Interface().([]interface{}); ok {
+				for _, v := range vs {
+					oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
 				}
 			}
 		}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 4f5fb67a0d..629bacdced 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -192,12 +192,17 @@ fieldLoop:
 
 	// Derive a mapping of oneof wrappers to fields.
 	oneofWrappers := mi.OneofWrappers
-	for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} {
-		if fn, ok := reflect.PtrTo(t).MethodByName(method); ok {
-			for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
-				if vs, ok := v.Interface().([]interface{}); ok {
-					oneofWrappers = vs
-				}
+	methods := make([]reflect.Method, 0, 2)
+	if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
+		methods = append(methods, m)
+	}
+	if m, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
+		methods = append(methods, m)
+	}
+	for _, fn := range methods {
+		for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
+			if vs, ok := v.Interface().([]interface{}); ok {
+				oneofWrappers = vs
 			}
 		}
 	}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
index 4c491bdf48..517e94434c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
@@ -159,6 +159,42 @@ func (p pointer) SetPointer(v pointer) {
 	p.v.Elem().Set(v.v)
 }
 
+func growSlice(p pointer, addCap int) {
+	// TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
+	in := p.v.Elem()
+	out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
+	reflect.Copy(out, in)
+	p.v.Elem().Set(out)
+}
+
+func (p pointer) growBoolSlice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growInt32Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growUint32Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growInt64Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growUint64Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growFloat64Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
+func (p pointer) growFloat32Slice(addCap int) {
+	growSlice(p, addCap)
+}
+
 func (Export) MessageStateOf(p Pointer) *messageState     { panic("not supported") }
 func (ms *messageState) pointer() pointer                 { panic("not supported") }
 func (ms *messageState) messageInfo() *MessageInfo        { panic("not supported") }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index ee0e0573e3..4b020e3116 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -138,6 +138,46 @@ func (p pointer) SetPointer(v pointer) {
 	*(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p)
 }
 
+func (p pointer) growBoolSlice(addCap int) {
+	sp := p.BoolSlice()
+	s := make([]bool, 0, addCap+len(*sp))
+	s = s[:len(*sp)]
+	copy(s, *sp)
+	*sp = s
+}
+
+func (p pointer) growInt32Slice(addCap int) {
+	sp := p.Int32Slice()
+	s := make([]int32, 0, addCap+len(*sp))
+	s = s[:len(*sp)]
+	copy(s, *sp)
+	*sp = s
+}
+
+func (p pointer) growUint32Slice(addCap int) {
+	p.growInt32Slice(addCap)
+}
+
+func (p pointer) growFloat32Slice(addCap int) {
+	p.growInt32Slice(addCap)
+}
+
+func (p pointer) growInt64Slice(addCap int) {
+	sp := p.Int64Slice()
+	s := make([]int64, 0, addCap+len(*sp))
+	s = s[:len(*sp)]
+	copy(s, *sp)
+	*sp = s
+}
+
+func (p pointer) growUint64Slice(addCap int) {
+	p.growInt64Slice(addCap)
+}
+
+func (p pointer) growFloat64Slice(addCap int) {
+	p.growInt64Slice(addCap)
+}
+
 // Static check that MessageState does not exceed the size of a pointer.
 const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{}))
 
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
similarity index 96%
rename from vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
rename to vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
index 61a84d3418..a008acd090 100644
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine
-// +build !purego,!appengine
+//go:build !purego && !appengine && !go1.21
+// +build !purego,!appengine,!go1.21
 
 package strs
 
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
new file mode 100644
index 0000000000..60166f2ba3
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
@@ -0,0 +1,74 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !purego && !appengine && go1.21
+// +build !purego,!appengine,go1.21
+
+package strs
+
+import (
+	"unsafe"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// UnsafeString returns an unsafe string reference of b.
+// The caller must treat the input slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user
+// unless the input slice is provably immutable.
+func UnsafeString(b []byte) string {
+	return unsafe.String(unsafe.SliceData(b), len(b))
+}
+
+// UnsafeBytes returns an unsafe bytes slice reference of s.
+// The caller must treat returned slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user.
+func UnsafeBytes(s string) []byte {
+	return unsafe.Slice(unsafe.StringData(s), len(s))
+}
+
+// Builder builds a set of strings with shared lifetime.
+// This differs from strings.Builder, which is for building a single string.
+type Builder struct {
+	buf []byte
+}
+
+// AppendFullName is equivalent to protoreflect.FullName.Append,
+// but optimized for large batches where each name has a shared lifetime.
+func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
+	n := len(prefix) + len(".") + len(name)
+	if len(prefix) == 0 {
+		n -= len(".")
+	}
+	sb.grow(n)
+	sb.buf = append(sb.buf, prefix...)
+	sb.buf = append(sb.buf, '.')
+	sb.buf = append(sb.buf, name...)
+	return protoreflect.FullName(sb.last(n))
+}
+
+// MakeString is equivalent to string(b), but optimized for large batches
+// with a shared lifetime.
+func (sb *Builder) MakeString(b []byte) string {
+	sb.grow(len(b))
+	sb.buf = append(sb.buf, b...)
+	return sb.last(len(b))
+}
+
+func (sb *Builder) grow(n int) {
+	if cap(sb.buf)-len(sb.buf) >= n {
+		return
+	}
+
+	// Unlike strings.Builder, we do not need to copy over the contents
+	// of the old buffer since our builder provides no API for
+	// retrieving previously created strings.
+	sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
+}
+
+func (sb *Builder) last(n int) string {
+	return UnsafeString(sb.buf[len(sb.buf)-n:])
+}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 0999f29d50..d8f48faffa 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,7 +51,7 @@ import (
 //  10. Send out the CL for review and submit it.
 const (
 	Major      = 1
-	Minor      = 31
+	Minor      = 32
 	Patch      = 0
 	PreRelease = ""
 )
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index 48d47946bb..e5b03b5677 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -69,7 +69,7 @@ func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error {
 // UnmarshalState parses a wire-format message and places the result in m.
 //
 // This method permits fine-grained control over the unmarshaler.
-// Most users should use Unmarshal instead.
+// Most users should use [Unmarshal] instead.
 func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) {
 	if o.RecursionLimit == 0 {
 		o.RecursionLimit = protowire.DefaultRecursionLimit
diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go
index ec71e717fe..80ed16a0c2 100644
--- a/vendor/google.golang.org/protobuf/proto/doc.go
+++ b/vendor/google.golang.org/protobuf/proto/doc.go
@@ -18,27 +18,27 @@
 // This package contains functions to convert to and from the wire format,
 // an efficient binary serialization of protocol buffers.
 //
-// • Size reports the size of a message in the wire format.
+//   - [Size] reports the size of a message in the wire format.
 //
-// • Marshal converts a message to the wire format.
-// The MarshalOptions type provides more control over wire marshaling.
+//   - [Marshal] converts a message to the wire format.
+//     The [MarshalOptions] type provides more control over wire marshaling.
 //
-// • Unmarshal converts a message from the wire format.
-// The UnmarshalOptions type provides more control over wire unmarshaling.
+//   - [Unmarshal] converts a message from the wire format.
+//     The [UnmarshalOptions] type provides more control over wire unmarshaling.
 //
 // # Basic message operations
 //
-// • Clone makes a deep copy of a message.
+//   - [Clone] makes a deep copy of a message.
 //
-// • Merge merges the content of a message into another.
+//   - [Merge] merges the content of a message into another.
 //
-// • Equal compares two messages. For more control over comparisons
-// and detailed reporting of differences, see package
-// "google.golang.org/protobuf/testing/protocmp".
+//   - [Equal] compares two messages. For more control over comparisons
+//     and detailed reporting of differences, see package
+//     [google.golang.org/protobuf/testing/protocmp].
 //
-// • Reset clears the content of a message.
+//   - [Reset] clears the content of a message.
 //
-// • CheckInitialized reports whether all required fields in a message are set.
+//   - [CheckInitialized] reports whether all required fields in a message are set.
 //
 // # Optional scalar constructors
 //
@@ -46,9 +46,9 @@
 // as pointers to a value. For example, an optional string field has the
 // Go type *string.
 //
-// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String
-// take a value and return a pointer to a new instance of it,
-// to simplify construction of optional field values.
+//   - [Bool], [Int32], [Int64], [Uint32], [Uint64], [Float32], [Float64], and [String]
+//     take a value and return a pointer to a new instance of it,
+//     to simplify construction of optional field values.
 //
 // Generated enum types usually have an Enum method which performs the
 // same operation.
@@ -57,29 +57,29 @@
 //
 // # Extension accessors
 //
-// • HasExtension, GetExtension, SetExtension, and ClearExtension
-// access extension field values in a protocol buffer message.
+//   - [HasExtension], [GetExtension], [SetExtension], and [ClearExtension]
+//     access extension field values in a protocol buffer message.
 //
 // Extension fields are only supported in proto2.
 //
 // # Related packages
 //
-// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to
-// and from JSON.
+//   - Package [google.golang.org/protobuf/encoding/protojson] converts messages to
+//     and from JSON.
 //
-// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to
-// and from the text format.
+//   - Package [google.golang.org/protobuf/encoding/prototext] converts messages to
+//     and from the text format.
 //
-// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a
-// reflection interface for protocol buffer data types.
+//   - Package [google.golang.org/protobuf/reflect/protoreflect] provides a
+//     reflection interface for protocol buffer data types.
 //
-// • Package "google.golang.org/protobuf/testing/protocmp" provides features
-// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp"
-// package.
+//   - Package [google.golang.org/protobuf/testing/protocmp] provides features
+//     to compare protocol buffer messages with the [github.com/google/go-cmp/cmp]
+//     package.
 //
-// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic
-// message type, suitable for working with messages where the protocol buffer
-// type is only known at runtime.
+//   - Package [google.golang.org/protobuf/types/dynamicpb] provides a dynamic
+//     message type, suitable for working with messages where the protocol buffer
+//     type is only known at runtime.
 //
 // This module contains additional packages for more specialized use cases.
 // Consult the individual package documentation for details.
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index bf7f816d0e..4fed202f9f 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -129,7 +129,7 @@ func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) {
 // MarshalState returns the wire-format encoding of a message.
 //
 // This method permits fine-grained control over the marshaler.
-// Most users should use Marshal instead.
+// Most users should use [Marshal] instead.
 func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) {
 	return o.marshal(in.Buf, in.Message)
 }
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index 5f293cda86..17899a3a76 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -26,7 +26,7 @@ func HasExtension(m Message, xt protoreflect.ExtensionType) bool {
 }
 
 // ClearExtension clears an extension field such that subsequent
-// HasExtension calls return false.
+// [HasExtension] calls return false.
 // It panics if m is invalid or if xt does not extend m.
 func ClearExtension(m Message, xt protoreflect.ExtensionType) {
 	m.ProtoReflect().Clear(xt.TypeDescriptor())
diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
index d761ab331d..3c6fe57807 100644
--- a/vendor/google.golang.org/protobuf/proto/merge.go
+++ b/vendor/google.golang.org/protobuf/proto/merge.go
@@ -21,7 +21,7 @@ import (
 // The unknown fields of src are appended to the unknown fields of dst.
 //
 // It is semantically equivalent to unmarshaling the encoded form of src
-// into dst with the UnmarshalOptions.Merge option specified.
+// into dst with the [UnmarshalOptions.Merge] option specified.
 func Merge(dst, src Message) {
 	// TODO: Should nil src be treated as semantically equivalent to a
 	// untyped, read-only, empty message? What about a nil dst?
diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go
index 1f0d183b10..7543ee6b25 100644
--- a/vendor/google.golang.org/protobuf/proto/proto.go
+++ b/vendor/google.golang.org/protobuf/proto/proto.go
@@ -15,18 +15,20 @@ import (
 // protobuf module that accept a Message, except where otherwise specified.
 //
 // This is the v2 interface definition for protobuf messages.
-// The v1 interface definition is "github.com/golang/protobuf/proto".Message.
+// The v1 interface definition is [github.com/golang/protobuf/proto.Message].
 //
-// To convert a v1 message to a v2 message,
-// use "github.com/golang/protobuf/proto".MessageV2.
-// To convert a v2 message to a v1 message,
-// use "github.com/golang/protobuf/proto".MessageV1.
+//   - To convert a v1 message to a v2 message,
+//     use [google.golang.org/protobuf/protoadapt.MessageV2Of].
+//   - To convert a v2 message to a v1 message,
+//     use [google.golang.org/protobuf/protoadapt.MessageV1Of].
 type Message = protoreflect.ProtoMessage
 
-// Error matches all errors produced by packages in the protobuf module.
+// Error matches all errors produced by packages in the protobuf module
+// according to [errors.Is].
 //
-// That is, errors.Is(err, Error) reports whether an error is produced
-// by this module.
+// Example usage:
+//
+//	if errors.Is(err, proto.Error) { ... }
 var Error error
 
 func init() {
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index e4dfb12050..baa0cc6218 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -3,11 +3,11 @@
 // license that can be found in the LICENSE file.
 
 // Package protodesc provides functionality for converting
-// FileDescriptorProto messages to/from protoreflect.FileDescriptor values.
+// FileDescriptorProto messages to/from [protoreflect.FileDescriptor] values.
 //
 // The google.protobuf.FileDescriptorProto is a protobuf message that describes
 // the type information for a .proto file in a form that is easily serializable.
-// The protoreflect.FileDescriptor is a more structured representation of
+// The [protoreflect.FileDescriptor] is a more structured representation of
 // the FileDescriptorProto message where references and remote dependencies
 // can be directly followed.
 package protodesc
@@ -24,11 +24,11 @@ import (
 	"google.golang.org/protobuf/types/descriptorpb"
 )
 
-// Resolver is the resolver used by NewFile to resolve dependencies.
+// Resolver is the resolver used by [NewFile] to resolve dependencies.
 // The enums and messages provided must belong to some parent file,
 // which is also registered.
 //
-// It is implemented by protoregistry.Files.
+// It is implemented by [protoregistry.Files].
 type Resolver interface {
 	FindFileByPath(string) (protoreflect.FileDescriptor, error)
 	FindDescriptorByName(protoreflect.FullName) (protoreflect.Descriptor, error)
@@ -61,19 +61,19 @@ type FileOptions struct {
 	AllowUnresolvable bool
 }
 
-// NewFile creates a new protoreflect.FileDescriptor from the provided
-// file descriptor message. See FileOptions.New for more information.
+// NewFile creates a new [protoreflect.FileDescriptor] from the provided
+// file descriptor message. See [FileOptions.New] for more information.
 func NewFile(fd *descriptorpb.FileDescriptorProto, r Resolver) (protoreflect.FileDescriptor, error) {
 	return FileOptions{}.New(fd, r)
 }
 
-// NewFiles creates a new protoregistry.Files from the provided
-// FileDescriptorSet message. See FileOptions.NewFiles for more information.
+// NewFiles creates a new [protoregistry.Files] from the provided
+// FileDescriptorSet message. See [FileOptions.NewFiles] for more information.
 func NewFiles(fd *descriptorpb.FileDescriptorSet) (*protoregistry.Files, error) {
 	return FileOptions{}.NewFiles(fd)
 }
 
-// New creates a new protoreflect.FileDescriptor from the provided
+// New creates a new [protoreflect.FileDescriptor] from the provided
 // file descriptor message. The file must represent a valid proto file according
 // to protobuf semantics. The returned descriptor is a deep copy of the input.
 //
@@ -93,9 +93,15 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
 		f.L1.Syntax = protoreflect.Proto2
 	case "proto3":
 		f.L1.Syntax = protoreflect.Proto3
+	case "editions":
+		f.L1.Syntax = protoreflect.Editions
+		f.L1.Edition = fromEditionProto(fd.GetEdition())
 	default:
 		return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
 	}
+	if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) {
+		return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
+	}
 	f.L1.Path = fd.GetName()
 	if f.L1.Path == "" {
 		return nil, errors.New("file path must be populated")
@@ -108,6 +114,9 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
 		opts = proto.Clone(opts).(*descriptorpb.FileOptions)
 		f.L2.Options = func() protoreflect.ProtoMessage { return opts }
 	}
+	if f.L1.Syntax == protoreflect.Editions {
+		initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures())
+	}
 
 	f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency()))
 	for _, i := range fd.GetPublicDependency() {
@@ -231,7 +240,7 @@ func (is importSet) importPublic(imps protoreflect.FileImports) {
 	}
 }
 
-// NewFiles creates a new protoregistry.Files from the provided
+// NewFiles creates a new [protoregistry.Files] from the provided
 // FileDescriptorSet message. The descriptor set must include only
 // valid files according to protobuf semantics. The returned descriptors
 // are a deep copy of the input.
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index 37efda1afe..aff6fd4900 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -137,6 +137,30 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
 		if fd.JsonName != nil {
 			f.L1.StringName.InitJSON(fd.GetJsonName())
 		}
+
+		if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions {
+			f.L1.Presence = resolveFeatureHasFieldPresence(f.Base.L0.ParentFile, fd)
+			// We reuse the existing field because the old option `[packed =
+			// true]` is mutually exclusive with the editions feature.
+			if fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED {
+				f.L1.HasPacked = true
+				f.L1.IsPacked = resolveFeatureRepeatedFieldEncodingPacked(f.Base.L0.ParentFile, fd)
+			}
+
+			// We pretend this option is always explicitly set because the only
+			// use of HasEnforceUTF8 is to determine whether to use EnforceUTF8
+			// or to return the appropriate default.
+			// When using editions we either parse the option or resolve the
+			// appropriate default here (instead of later when this option is
+			// requested from the descriptor).
+			// In proto2/proto3 syntax HasEnforceUTF8 might be false.
+			f.L1.HasEnforceUTF8 = true
+			f.L1.EnforceUTF8 = resolveFeatureEnforceUTF8(f.Base.L0.ParentFile, fd)
+
+			if f.L1.Kind == protoreflect.MessageKind && resolveFeatureDelimitedEncoding(f.Base.L0.ParentFile, fd) {
+				f.L1.Kind = protoreflect.GroupKind
+			}
+		}
 	}
 	return fs, nil
 }
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
new file mode 100644
index 0000000000..7352926cab
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -0,0 +1,177 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protodesc
+
+import (
+	_ "embed"
+	"fmt"
+	"os"
+	"sync"
+
+	"google.golang.org/protobuf/internal/filedesc"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/descriptorpb"
+)
+
+const (
+	SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2
+	SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023
+)
+
+//go:embed editions_defaults.binpb
+var binaryEditionDefaults []byte
+var defaults = &descriptorpb.FeatureSetDefaults{}
+var defaultsCacheMu sync.Mutex
+var defaultsCache = make(map[filedesc.Edition]*descriptorpb.FeatureSet)
+
+func init() {
+	err := proto.Unmarshal(binaryEditionDefaults, defaults)
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "unmarshal editions defaults: %v\n", err)
+		os.Exit(1)
+	}
+}
+
+func fromEditionProto(epb descriptorpb.Edition) filedesc.Edition {
+	return filedesc.Edition(epb)
+}
+
+func toEditionProto(ed filedesc.Edition) descriptorpb.Edition {
+	switch ed {
+	case filedesc.EditionUnknown:
+		return descriptorpb.Edition_EDITION_UNKNOWN
+	case filedesc.EditionProto2:
+		return descriptorpb.Edition_EDITION_PROTO2
+	case filedesc.EditionProto3:
+		return descriptorpb.Edition_EDITION_PROTO3
+	case filedesc.Edition2023:
+		return descriptorpb.Edition_EDITION_2023
+	default:
+		panic(fmt.Sprintf("unknown value for edition: %v", ed))
+	}
+}
+
+func getFeatureSetFor(ed filedesc.Edition) *descriptorpb.FeatureSet {
+	defaultsCacheMu.Lock()
+	defer defaultsCacheMu.Unlock()
+	if def, ok := defaultsCache[ed]; ok {
+		return def
+	}
+	edpb := toEditionProto(ed)
+	if defaults.GetMinimumEdition() > edpb || defaults.GetMaximumEdition() < edpb {
+		// This should never happen protodesc.(FileOptions).New would fail when
+		// initializing the file descriptor.
+		// This most likely means the embedded defaults were not updated.
+		fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb)
+		os.Exit(1)
+	}
+	fs := defaults.GetDefaults()[0].GetFeatures()
+	// Using a linear search for now.
+	// Editions are guaranteed to be sorted and thus we could use a binary search.
+	// Given that there are only a handful of editions (with one more per year)
+	// there is not much reason to use a binary search.
+	for _, def := range defaults.GetDefaults() {
+		if def.GetEdition() <= edpb {
+			fs = def.GetFeatures()
+		} else {
+			break
+		}
+	}
+	defaultsCache[ed] = fs
+	return fs
+}
+
+func resolveFeatureHasFieldPresence(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
+	fs := fieldDesc.GetOptions().GetFeatures()
+	if fs == nil || fs.FieldPresence == nil {
+		return fileDesc.L1.EditionFeatures.IsFieldPresence
+	}
+	return fs.GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED ||
+		fs.GetFieldPresence() == descriptorpb.FeatureSet_EXPLICIT
+}
+
+func resolveFeatureRepeatedFieldEncodingPacked(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
+	fs := fieldDesc.GetOptions().GetFeatures()
+	if fs == nil || fs.RepeatedFieldEncoding == nil {
+		return fileDesc.L1.EditionFeatures.IsPacked
+	}
+	return fs.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED
+}
+
+func resolveFeatureEnforceUTF8(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
+	fs := fieldDesc.GetOptions().GetFeatures()
+	if fs == nil || fs.Utf8Validation == nil {
+		return fileDesc.L1.EditionFeatures.IsUTF8Validated
+	}
+	return fs.GetUtf8Validation() == descriptorpb.FeatureSet_VERIFY
+}
+
+func resolveFeatureDelimitedEncoding(fileDesc *filedesc.File, fieldDesc *descriptorpb.FieldDescriptorProto) bool {
+	fs := fieldDesc.GetOptions().GetFeatures()
+	if fs == nil || fs.MessageEncoding == nil {
+		return fileDesc.L1.EditionFeatures.IsDelimitedEncoded
+	}
+	return fs.GetMessageEncoding() == descriptorpb.FeatureSet_DELIMITED
+}
+
+// initFileDescFromFeatureSet initializes editions related fields in fd based
+// on fs. If fs is nil it is assumed to be an empty featureset and all fields
+// will be initialized with the appropriate default. fd.L1.Edition must be set
+// before calling this function.
+func initFileDescFromFeatureSet(fd *filedesc.File, fs *descriptorpb.FeatureSet) {
+	dfs := getFeatureSetFor(fd.L1.Edition)
+	if fs == nil {
+		fs = &descriptorpb.FeatureSet{}
+	}
+
+	var fieldPresence descriptorpb.FeatureSet_FieldPresence
+	if fp := fs.FieldPresence; fp != nil {
+		fieldPresence = *fp
+	} else {
+		fieldPresence = *dfs.FieldPresence
+	}
+	fd.L1.EditionFeatures.IsFieldPresence = fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED ||
+		fieldPresence == descriptorpb.FeatureSet_EXPLICIT
+
+	var enumType descriptorpb.FeatureSet_EnumType
+	if et := fs.EnumType; et != nil {
+		enumType = *et
+	} else {
+		enumType = *dfs.EnumType
+	}
+	fd.L1.EditionFeatures.IsOpenEnum = enumType == descriptorpb.FeatureSet_OPEN
+
+	var respeatedFieldEncoding descriptorpb.FeatureSet_RepeatedFieldEncoding
+	if rfe := fs.RepeatedFieldEncoding; rfe != nil {
+		respeatedFieldEncoding = *rfe
+	} else {
+		respeatedFieldEncoding = *dfs.RepeatedFieldEncoding
+	}
+	fd.L1.EditionFeatures.IsPacked = respeatedFieldEncoding == descriptorpb.FeatureSet_PACKED
+
+	var isUTF8Validated descriptorpb.FeatureSet_Utf8Validation
+	if utf8val := fs.Utf8Validation; utf8val != nil {
+		isUTF8Validated = *utf8val
+	} else {
+		isUTF8Validated = *dfs.Utf8Validation
+	}
+	fd.L1.EditionFeatures.IsUTF8Validated = isUTF8Validated == descriptorpb.FeatureSet_VERIFY
+
+	var messageEncoding descriptorpb.FeatureSet_MessageEncoding
+	if me := fs.MessageEncoding; me != nil {
+		messageEncoding = *me
+	} else {
+		messageEncoding = *dfs.MessageEncoding
+	}
+	fd.L1.EditionFeatures.IsDelimitedEncoded = messageEncoding == descriptorpb.FeatureSet_DELIMITED
+
+	var jsonFormat descriptorpb.FeatureSet_JsonFormat
+	if jf := fs.JsonFormat; jf != nil {
+		jsonFormat = *jf
+	} else {
+		jsonFormat = *dfs.JsonFormat
+	}
+	fd.L1.EditionFeatures.IsJSONCompliant = jsonFormat == descriptorpb.FeatureSet_ALLOW
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb
new file mode 100644
index 0000000000..1a8610a843
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions_defaults.binpb
@@ -0,0 +1,4 @@
+
+ (0�
+ (0�
+ (0� �(�
\ No newline at end of file
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
index a7c5ceffc9..9d6e05420f 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -16,7 +16,7 @@ import (
 	"google.golang.org/protobuf/types/descriptorpb"
 )
 
-// ToFileDescriptorProto copies a protoreflect.FileDescriptor into a
+// ToFileDescriptorProto copies a [protoreflect.FileDescriptor] into a
 // google.protobuf.FileDescriptorProto message.
 func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {
 	p := &descriptorpb.FileDescriptorProto{
@@ -70,13 +70,13 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD
 	for i, exts := 0, file.Extensions(); i < exts.Len(); i++ {
 		p.Extension = append(p.Extension, ToFieldDescriptorProto(exts.Get(i)))
 	}
-	if syntax := file.Syntax(); syntax != protoreflect.Proto2 {
+	if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() {
 		p.Syntax = proto.String(file.Syntax().String())
 	}
 	return p
 }
 
-// ToDescriptorProto copies a protoreflect.MessageDescriptor into a
+// ToDescriptorProto copies a [protoreflect.MessageDescriptor] into a
 // google.protobuf.DescriptorProto message.
 func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {
 	p := &descriptorpb.DescriptorProto{
@@ -119,7 +119,7 @@ func ToDescriptorProto(message protoreflect.MessageDescriptor) *descriptorpb.Des
 	return p
 }
 
-// ToFieldDescriptorProto copies a protoreflect.FieldDescriptor into a
+// ToFieldDescriptorProto copies a [protoreflect.FieldDescriptor] into a
 // google.protobuf.FieldDescriptorProto message.
 func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto {
 	p := &descriptorpb.FieldDescriptorProto{
@@ -168,7 +168,7 @@ func ToFieldDescriptorProto(field protoreflect.FieldDescriptor) *descriptorpb.Fi
 	return p
 }
 
-// ToOneofDescriptorProto copies a protoreflect.OneofDescriptor into a
+// ToOneofDescriptorProto copies a [protoreflect.OneofDescriptor] into a
 // google.protobuf.OneofDescriptorProto message.
 func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto {
 	return &descriptorpb.OneofDescriptorProto{
@@ -177,7 +177,7 @@ func ToOneofDescriptorProto(oneof protoreflect.OneofDescriptor) *descriptorpb.On
 	}
 }
 
-// ToEnumDescriptorProto copies a protoreflect.EnumDescriptor into a
+// ToEnumDescriptorProto copies a [protoreflect.EnumDescriptor] into a
 // google.protobuf.EnumDescriptorProto message.
 func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto {
 	p := &descriptorpb.EnumDescriptorProto{
@@ -200,7 +200,7 @@ func ToEnumDescriptorProto(enum protoreflect.EnumDescriptor) *descriptorpb.EnumD
 	return p
 }
 
-// ToEnumValueDescriptorProto copies a protoreflect.EnumValueDescriptor into a
+// ToEnumValueDescriptorProto copies a [protoreflect.EnumValueDescriptor] into a
 // google.protobuf.EnumValueDescriptorProto message.
 func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto {
 	return &descriptorpb.EnumValueDescriptorProto{
@@ -210,7 +210,7 @@ func ToEnumValueDescriptorProto(value protoreflect.EnumValueDescriptor) *descrip
 	}
 }
 
-// ToServiceDescriptorProto copies a protoreflect.ServiceDescriptor into a
+// ToServiceDescriptorProto copies a [protoreflect.ServiceDescriptor] into a
 // google.protobuf.ServiceDescriptorProto message.
 func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto {
 	p := &descriptorpb.ServiceDescriptorProto{
@@ -223,7 +223,7 @@ func ToServiceDescriptorProto(service protoreflect.ServiceDescriptor) *descripto
 	return p
 }
 
-// ToMethodDescriptorProto copies a protoreflect.MethodDescriptor into a
+// ToMethodDescriptorProto copies a [protoreflect.MethodDescriptor] into a
 // google.protobuf.MethodDescriptorProto message.
 func ToMethodDescriptorProto(method protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {
 	p := &descriptorpb.MethodDescriptorProto{
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
index 55aa14922b..ec6572dfda 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
@@ -10,46 +10,46 @@
 //
 // # Protocol Buffer Descriptors
 //
-// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor)
+// Protobuf descriptors (e.g., [EnumDescriptor] or [MessageDescriptor])
 // are immutable objects that represent protobuf type information.
 // They are wrappers around the messages declared in descriptor.proto.
 // Protobuf descriptors alone lack any information regarding Go types.
 //
-// Enums and messages generated by this module implement Enum and ProtoMessage,
+// Enums and messages generated by this module implement [Enum] and [ProtoMessage],
 // where the Descriptor and ProtoReflect.Descriptor accessors respectively
 // return the protobuf descriptor for the values.
 //
 // The protobuf descriptor interfaces are not meant to be implemented by
 // user code since they might need to be extended in the future to support
 // additions to the protobuf language.
-// The "google.golang.org/protobuf/reflect/protodesc" package converts between
+// The [google.golang.org/protobuf/reflect/protodesc] package converts between
 // google.protobuf.DescriptorProto messages and protobuf descriptors.
 //
 // # Go Type Descriptors
 //
-// A type descriptor (e.g., EnumType or MessageType) is a constructor for
+// A type descriptor (e.g., [EnumType] or [MessageType]) is a constructor for
 // a concrete Go type that represents the associated protobuf descriptor.
 // There is commonly a one-to-one relationship between protobuf descriptors and
 // Go type descriptors, but it can potentially be a one-to-many relationship.
 //
-// Enums and messages generated by this module implement Enum and ProtoMessage,
+// Enums and messages generated by this module implement [Enum] and [ProtoMessage],
 // where the Type and ProtoReflect.Type accessors respectively
 // return the protobuf descriptor for the values.
 //
-// The "google.golang.org/protobuf/types/dynamicpb" package can be used to
+// The [google.golang.org/protobuf/types/dynamicpb] package can be used to
 // create Go type descriptors from protobuf descriptors.
 //
 // # Value Interfaces
 //
-// The Enum and Message interfaces provide a reflective view over an
+// The [Enum] and [Message] interfaces provide a reflective view over an
 // enum or message instance. For enums, it provides the ability to retrieve
 // the enum value number for any concrete enum type. For messages, it provides
 // the ability to access or manipulate fields of the message.
 //
-// To convert a proto.Message to a protoreflect.Message, use the
+// To convert a [google.golang.org/protobuf/proto.Message] to a [protoreflect.Message], use the
 // former's ProtoReflect method. Since the ProtoReflect method is new to the
 // v2 message interface, it may not be present on older message implementations.
-// The "github.com/golang/protobuf/proto".MessageReflect function can be used
+// The [github.com/golang/protobuf/proto.MessageReflect] function can be used
 // to obtain a reflective view on older messages.
 //
 // # Relationships
@@ -71,12 +71,12 @@
 //	      │                                 │
 //	      └────────────────── Type() ───────┘
 //
-// • An EnumType describes a concrete Go enum type.
+// • An [EnumType] describes a concrete Go enum type.
 // It has an EnumDescriptor and can construct an Enum instance.
 //
-// • An EnumDescriptor describes an abstract protobuf enum type.
+// • An [EnumDescriptor] describes an abstract protobuf enum type.
 //
-// • An Enum is a concrete enum instance. Generated enums implement Enum.
+// • An [Enum] is a concrete enum instance. Generated enums implement Enum.
 //
 //	  ┌──────────────── New() ─────────────────┐
 //	  │                                        │
@@ -90,24 +90,26 @@
 //	       │                                    │
 //	       └─────────────────── Type() ─────────┘
 //
-// • A MessageType describes a concrete Go message type.
-// It has a MessageDescriptor and can construct a Message instance.
-// Just as how Go's reflect.Type is a reflective description of a Go type,
-// a MessageType is a reflective description of a Go type for a protobuf message.
+// • A [MessageType] describes a concrete Go message type.
+// It has a [MessageDescriptor] and can construct a [Message] instance.
+// Just as how Go's [reflect.Type] is a reflective description of a Go type,
+// a [MessageType] is a reflective description of a Go type for a protobuf message.
 //
-// • A MessageDescriptor describes an abstract protobuf message type.
-// It has no understanding of Go types. In order to construct a MessageType
-// from just a MessageDescriptor, you can consider looking up the message type
-// in the global registry using protoregistry.GlobalTypes.FindMessageByName
-// or constructing a dynamic MessageType using dynamicpb.NewMessageType.
+// • A [MessageDescriptor] describes an abstract protobuf message type.
+// It has no understanding of Go types. In order to construct a [MessageType]
+// from just a [MessageDescriptor], you can consider looking up the message type
+// in the global registry using the FindMessageByName method on
+// [google.golang.org/protobuf/reflect/protoregistry.GlobalTypes]
+// or constructing a dynamic [MessageType] using
+// [google.golang.org/protobuf/types/dynamicpb.NewMessageType].
 //
-// • A Message is a reflective view over a concrete message instance.
-// Generated messages implement ProtoMessage, which can convert to a Message.
-// Just as how Go's reflect.Value is a reflective view over a Go value,
-// a Message is a reflective view over a concrete protobuf message instance.
-// Using Go reflection as an analogy, the ProtoReflect method is similar to
-// calling reflect.ValueOf, and the Message.Interface method is similar to
-// calling reflect.Value.Interface.
+// • A [Message] is a reflective view over a concrete message instance.
+// Generated messages implement [ProtoMessage], which can convert to a [Message].
+// Just as how Go's [reflect.Value] is a reflective view over a Go value,
+// a [Message] is a reflective view over a concrete protobuf message instance.
+// Using Go reflection as an analogy, the [ProtoMessage.ProtoReflect] method is similar to
+// calling [reflect.ValueOf], and the [Message.Interface] method is similar to
+// calling [reflect.Value.Interface].
 //
 //	      ┌── TypeDescriptor() ──┐    ┌───── Descriptor() ─────┐
 //	      │                      V    │                        V
@@ -119,15 +121,15 @@
 //	                                 │                          │
 //	                                 └────── implements ────────┘
 //
-// • An ExtensionType describes a concrete Go implementation of an extension.
-// It has an ExtensionTypeDescriptor and can convert to/from
-// abstract Values and Go values.
+// • An [ExtensionType] describes a concrete Go implementation of an extension.
+// It has an [ExtensionTypeDescriptor] and can convert to/from
+// an abstract [Value] and a Go value.
 //
-// • An ExtensionTypeDescriptor is an ExtensionDescriptor
-// which also has an ExtensionType.
+// • An [ExtensionTypeDescriptor] is an [ExtensionDescriptor]
+// which also has an [ExtensionType].
 //
-// • An ExtensionDescriptor describes an abstract protobuf extension field and
-// may not always be an ExtensionTypeDescriptor.
+// • An [ExtensionDescriptor] describes an abstract protobuf extension field and
+// may not always be an [ExtensionTypeDescriptor].
 package protoreflect
 
 import (
@@ -142,7 +144,7 @@ type doNotImplement pragma.DoNotImplement
 
 // ProtoMessage is the top-level interface that all proto messages implement.
 // This is declared in the protoreflect package to avoid a cyclic dependency;
-// use the proto.Message type instead, which aliases this type.
+// use the [google.golang.org/protobuf/proto.Message] type instead, which aliases this type.
 type ProtoMessage interface{ ProtoReflect() Message }
 
 // Syntax is the language version of the proto file.
@@ -151,8 +153,9 @@ type Syntax syntax
 type syntax int8 // keep exact type opaque as the int type may change
 
 const (
-	Proto2 Syntax = 2
-	Proto3 Syntax = 3
+	Proto2   Syntax = 2
+	Proto3   Syntax = 3
+	Editions Syntax = 4
 )
 
 // IsValid reports whether the syntax is valid.
@@ -436,7 +439,7 @@ type Names interface {
 // FullName is a qualified name that uniquely identifies a proto declaration.
 // A qualified name is the concatenation of the proto package along with the
 // fully-declared name (i.e., name of parent preceding the name of the child),
-// with a '.' delimiter placed between each Name.
+// with a '.' delimiter placed between each [Name].
 //
 // This should not have any leading or trailing dots.
 type FullName string // e.g., "google.protobuf.Field.Kind"
@@ -480,7 +483,7 @@ func isLetterDigit(c byte) bool {
 }
 
 // Name returns the short name, which is the last identifier segment.
-// A single segment FullName is the Name itself.
+// A single segment FullName is the [Name] itself.
 func (n FullName) Name() Name {
 	if i := strings.LastIndexByte(string(n), '.'); i >= 0 {
 		return Name(n[i+1:])
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index 717b106f3d..0c045db6ab 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -35,7 +35,7 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
 		b = p.appendSingularField(b, "source_code_info", (*SourcePath).appendSourceCodeInfo)
 	case 12:
 		b = p.appendSingularField(b, "syntax", nil)
-	case 13:
+	case 14:
 		b = p.appendSingularField(b, "edition", nil)
 	}
 	return b
@@ -180,6 +180,8 @@ func (p *SourcePath) appendFileOptions(b []byte) []byte {
 		b = p.appendSingularField(b, "php_metadata_namespace", nil)
 	case 45:
 		b = p.appendSingularField(b, "ruby_package", nil)
+	case 50:
+		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
 	case 999:
 		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
 	}
@@ -240,6 +242,8 @@ func (p *SourcePath) appendMessageOptions(b []byte) []byte {
 		b = p.appendSingularField(b, "map_entry", nil)
 	case 11:
 		b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil)
+	case 12:
+		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
 	case 999:
 		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
 	}
@@ -285,6 +289,8 @@ func (p *SourcePath) appendEnumOptions(b []byte) []byte {
 		b = p.appendSingularField(b, "deprecated", nil)
 	case 6:
 		b = p.appendSingularField(b, "deprecated_legacy_json_field_conflicts", nil)
+	case 7:
+		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
 	case 999:
 		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
 	}
@@ -330,6 +336,8 @@ func (p *SourcePath) appendServiceOptions(b []byte) []byte {
 		return b
 	}
 	switch (*p)[0] {
+	case 34:
+		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
 	case 33:
 		b = p.appendSingularField(b, "deprecated", nil)
 	case 999:
@@ -361,16 +369,39 @@ func (p *SourcePath) appendFieldOptions(b []byte) []byte {
 		b = p.appendSingularField(b, "debug_redact", nil)
 	case 17:
 		b = p.appendSingularField(b, "retention", nil)
-	case 18:
-		b = p.appendSingularField(b, "target", nil)
 	case 19:
 		b = p.appendRepeatedField(b, "targets", nil)
+	case 20:
+		b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault)
+	case 21:
+		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
 	case 999:
 		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
 	}
 	return b
 }
 
+func (p *SourcePath) appendFeatureSet(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "field_presence", nil)
+	case 2:
+		b = p.appendSingularField(b, "enum_type", nil)
+	case 3:
+		b = p.appendSingularField(b, "repeated_field_encoding", nil)
+	case 4:
+		b = p.appendSingularField(b, "utf8_validation", nil)
+	case 5:
+		b = p.appendSingularField(b, "message_encoding", nil)
+	case 6:
+		b = p.appendSingularField(b, "json_format", nil)
+	}
+	return b
+}
+
 func (p *SourcePath) appendUninterpretedOption(b []byte) []byte {
 	if len(*p) == 0 {
 		return b
@@ -422,6 +453,8 @@ func (p *SourcePath) appendExtensionRangeOptions(b []byte) []byte {
 		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
 	case 2:
 		b = p.appendRepeatedField(b, "declaration", (*SourcePath).appendExtensionRangeOptions_Declaration)
+	case 50:
+		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
 	case 3:
 		b = p.appendSingularField(b, "verification", nil)
 	}
@@ -433,6 +466,8 @@ func (p *SourcePath) appendOneofOptions(b []byte) []byte {
 		return b
 	}
 	switch (*p)[0] {
+	case 1:
+		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
 	case 999:
 		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
 	}
@@ -446,6 +481,10 @@ func (p *SourcePath) appendEnumValueOptions(b []byte) []byte {
 	switch (*p)[0] {
 	case 1:
 		b = p.appendSingularField(b, "deprecated", nil)
+	case 2:
+		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
+	case 3:
+		b = p.appendSingularField(b, "debug_redact", nil)
 	case 999:
 		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
 	}
@@ -461,12 +500,27 @@ func (p *SourcePath) appendMethodOptions(b []byte) []byte {
 		b = p.appendSingularField(b, "deprecated", nil)
 	case 34:
 		b = p.appendSingularField(b, "idempotency_level", nil)
+	case 35:
+		b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
 	case 999:
 		b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
 	}
 	return b
 }
 
+func (p *SourcePath) appendFieldOptions_EditionDefault(b []byte) []byte {
+	if len(*p) == 0 {
+		return b
+	}
+	switch (*p)[0] {
+	case 3:
+		b = p.appendSingularField(b, "edition", nil)
+	case 2:
+		b = p.appendSingularField(b, "value", nil)
+	}
+	return b
+}
+
 func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte {
 	if len(*p) == 0 {
 		return b
@@ -491,8 +545,6 @@ func (p *SourcePath) appendExtensionRangeOptions_Declaration(b []byte) []byte {
 		b = p.appendSingularField(b, "full_name", nil)
 	case 3:
 		b = p.appendSingularField(b, "type", nil)
-	case 4:
-		b = p.appendSingularField(b, "is_repeated", nil)
 	case 5:
 		b = p.appendSingularField(b, "reserved", nil)
 	case 6:
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index 3867470d30..60ff62b4c8 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -12,7 +12,7 @@ package protoreflect
 // exactly identical. However, it is possible for the same semantically
 // identical proto type to be represented by multiple type descriptors.
 //
-// For example, suppose we have t1 and t2 which are both MessageDescriptors.
+// For example, suppose we have t1 and t2 which are both an [MessageDescriptor].
 // If t1 == t2, then the types are definitely equal and all accessors return
 // the same information. However, if t1 != t2, then it is still possible that
 // they still represent the same proto type (e.g., t1.FullName == t2.FullName).
@@ -115,7 +115,7 @@ type Descriptor interface {
 // corresponds with the google.protobuf.FileDescriptorProto message.
 //
 // Top-level declarations:
-// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor.
+// [EnumDescriptor], [MessageDescriptor], [FieldDescriptor], and/or [ServiceDescriptor].
 type FileDescriptor interface {
 	Descriptor // Descriptor.FullName is identical to Package
 
@@ -180,8 +180,8 @@ type FileImport struct {
 // corresponds with the google.protobuf.DescriptorProto message.
 //
 // Nested declarations:
-// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor,
-// and/or MessageDescriptor.
+// [FieldDescriptor], [OneofDescriptor], [FieldDescriptor], [EnumDescriptor],
+// and/or [MessageDescriptor].
 type MessageDescriptor interface {
 	Descriptor
 
@@ -214,7 +214,7 @@ type MessageDescriptor interface {
 	ExtensionRanges() FieldRanges
 	// ExtensionRangeOptions returns the ith extension range options.
 	//
-	// To avoid a dependency cycle, this method returns a proto.Message value,
+	// To avoid a dependency cycle, this method returns a proto.Message] value,
 	// which always contains a google.protobuf.ExtensionRangeOptions message.
 	// This method returns a typed nil-pointer if no options are present.
 	// The caller must import the descriptorpb package to use this.
@@ -231,9 +231,9 @@ type MessageDescriptor interface {
 }
 type isMessageDescriptor interface{ ProtoType(MessageDescriptor) }
 
-// MessageType encapsulates a MessageDescriptor with a concrete Go implementation.
+// MessageType encapsulates a [MessageDescriptor] with a concrete Go implementation.
 // It is recommended that implementations of this interface also implement the
-// MessageFieldTypes interface.
+// [MessageFieldTypes] interface.
 type MessageType interface {
 	// New returns a newly allocated empty message.
 	// It may return nil for synthetic messages representing a map entry.
@@ -249,19 +249,19 @@ type MessageType interface {
 	Descriptor() MessageDescriptor
 }
 
-// MessageFieldTypes extends a MessageType by providing type information
+// MessageFieldTypes extends a [MessageType] by providing type information
 // regarding enums and messages referenced by the message fields.
 type MessageFieldTypes interface {
 	MessageType
 
-	// Enum returns the EnumType for the ith field in Descriptor.Fields.
+	// Enum returns the EnumType for the ith field in MessageDescriptor.Fields.
 	// It returns nil if the ith field is not an enum kind.
 	// It panics if out of bounds.
 	//
 	// Invariant: mt.Enum(i).Descriptor() == mt.Descriptor().Fields(i).Enum()
 	Enum(i int) EnumType
 
-	// Message returns the MessageType for the ith field in Descriptor.Fields.
+	// Message returns the MessageType for the ith field in MessageDescriptor.Fields.
 	// It returns nil if the ith field is not a message or group kind.
 	// It panics if out of bounds.
 	//
@@ -286,8 +286,8 @@ type MessageDescriptors interface {
 // corresponds with the google.protobuf.FieldDescriptorProto message.
 //
 // It is used for both normal fields defined within the parent message
-// (e.g., MessageDescriptor.Fields) and fields that extend some remote message
-// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions).
+// (e.g., [MessageDescriptor.Fields]) and fields that extend some remote message
+// (e.g., [FileDescriptor.Extensions] or [MessageDescriptor.Extensions]).
 type FieldDescriptor interface {
 	Descriptor
 
@@ -344,7 +344,7 @@ type FieldDescriptor interface {
 	// IsMap reports whether this field represents a map,
 	// where the value type for the associated field is a Map.
 	// It is equivalent to checking whether Cardinality is Repeated,
-	// that the Kind is MessageKind, and that Message.IsMapEntry reports true.
+	// that the Kind is MessageKind, and that MessageDescriptor.IsMapEntry reports true.
 	IsMap() bool
 
 	// MapKey returns the field descriptor for the key in the map entry.
@@ -419,7 +419,7 @@ type OneofDescriptor interface {
 
 	// IsSynthetic reports whether this is a synthetic oneof created to support
 	// proto3 optional semantics. If true, Fields contains exactly one field
-	// with HasOptionalKeyword specified.
+	// with FieldDescriptor.HasOptionalKeyword specified.
 	IsSynthetic() bool
 
 	// Fields is a list of fields belonging to this oneof.
@@ -442,10 +442,10 @@ type OneofDescriptors interface {
 	doNotImplement
 }
 
-// ExtensionDescriptor is an alias of FieldDescriptor for documentation.
+// ExtensionDescriptor is an alias of [FieldDescriptor] for documentation.
 type ExtensionDescriptor = FieldDescriptor
 
-// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType.
+// ExtensionTypeDescriptor is an [ExtensionDescriptor] with an associated [ExtensionType].
 type ExtensionTypeDescriptor interface {
 	ExtensionDescriptor
 
@@ -470,12 +470,12 @@ type ExtensionDescriptors interface {
 	doNotImplement
 }
 
-// ExtensionType encapsulates an ExtensionDescriptor with a concrete
+// ExtensionType encapsulates an [ExtensionDescriptor] with a concrete
 // Go implementation. The nested field descriptor must be for a extension field.
 //
 // While a normal field is a member of the parent message that it is declared
-// within (see Descriptor.Parent), an extension field is a member of some other
-// target message (see ExtensionDescriptor.Extendee) and may have no
+// within (see [Descriptor.Parent]), an extension field is a member of some other
+// target message (see [FieldDescriptor.ContainingMessage]) and may have no
 // relationship with the parent. However, the full name of an extension field is
 // relative to the parent that it is declared within.
 //
@@ -532,7 +532,7 @@ type ExtensionType interface {
 // corresponds with the google.protobuf.EnumDescriptorProto message.
 //
 // Nested declarations:
-// EnumValueDescriptor.
+// [EnumValueDescriptor].
 type EnumDescriptor interface {
 	Descriptor
 
@@ -548,7 +548,7 @@ type EnumDescriptor interface {
 }
 type isEnumDescriptor interface{ ProtoType(EnumDescriptor) }
 
-// EnumType encapsulates an EnumDescriptor with a concrete Go implementation.
+// EnumType encapsulates an [EnumDescriptor] with a concrete Go implementation.
 type EnumType interface {
 	// New returns an instance of this enum type with its value set to n.
 	New(n EnumNumber) Enum
@@ -610,7 +610,7 @@ type EnumValueDescriptors interface {
 // ServiceDescriptor describes a service and
 // corresponds with the google.protobuf.ServiceDescriptorProto message.
 //
-// Nested declarations: MethodDescriptor.
+// Nested declarations: [MethodDescriptor].
 type ServiceDescriptor interface {
 	Descriptor
 
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
index 37601b7819..a7b0d06ff3 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -27,16 +27,16 @@ type Enum interface {
 // Message is a reflective interface for a concrete message value,
 // encapsulating both type and value information for the message.
 //
-// Accessor/mutators for individual fields are keyed by FieldDescriptor.
+// Accessor/mutators for individual fields are keyed by [FieldDescriptor].
 // For non-extension fields, the descriptor must exactly match the
 // field known by the parent message.
-// For extension fields, the descriptor must implement ExtensionTypeDescriptor,
-// extend the parent message (i.e., have the same message FullName), and
+// For extension fields, the descriptor must implement [ExtensionTypeDescriptor],
+// extend the parent message (i.e., have the same message [FullName]), and
 // be within the parent's extension range.
 //
-// Each field Value can be a scalar or a composite type (Message, List, or Map).
-// See Value for the Go types associated with a FieldDescriptor.
-// Providing a Value that is invalid or of an incorrect type panics.
+// Each field [Value] can be a scalar or a composite type ([Message], [List], or [Map]).
+// See [Value] for the Go types associated with a [FieldDescriptor].
+// Providing a [Value] that is invalid or of an incorrect type panics.
 type Message interface {
 	// Descriptor returns message descriptor, which contains only the protobuf
 	// type information for the message.
@@ -152,7 +152,7 @@ type Message interface {
 	// This method may return nil.
 	//
 	// The returned methods type is identical to
-	// "google.golang.org/protobuf/runtime/protoiface".Methods.
+	// google.golang.org/protobuf/runtime/protoiface.Methods.
 	// Consult the protoiface package documentation for details.
 	ProtoMethods() *methods
 }
@@ -175,8 +175,8 @@ func (b RawFields) IsValid() bool {
 }
 
 // List is a zero-indexed, ordered list.
-// The element Value type is determined by FieldDescriptor.Kind.
-// Providing a Value that is invalid or of an incorrect type panics.
+// The element [Value] type is determined by [FieldDescriptor.Kind].
+// Providing a [Value] that is invalid or of an incorrect type panics.
 type List interface {
 	// Len reports the number of entries in the List.
 	// Get, Set, and Truncate panic with out of bound indexes.
@@ -226,9 +226,9 @@ type List interface {
 }
 
 // Map is an unordered, associative map.
-// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind.
-// The entry Value type is determined by FieldDescriptor.MapValue.Kind.
-// Providing a MapKey or Value that is invalid or of an incorrect type panics.
+// The entry [MapKey] type is determined by [FieldDescriptor.MapKey].Kind.
+// The entry [Value] type is determined by [FieldDescriptor.MapValue].Kind.
+// Providing a [MapKey] or [Value] that is invalid or of an incorrect type panics.
 type Map interface {
 	// Len reports the number of elements in the map.
 	Len() int
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
index 591652541f..654599d449 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_equal.go
@@ -24,19 +24,19 @@ import (
 //     Unlike the == operator, a NaN is equal to another NaN.
 //
 //   - Enums are equal if they contain the same number.
-//     Since Value does not contain an enum descriptor,
+//     Since [Value] does not contain an enum descriptor,
 //     enum values do not consider the type of the enum.
 //
 //   - Other scalar values are equal if they contain the same value.
 //
-//   - Message values are equal if they belong to the same message descriptor,
+//   - [Message] values are equal if they belong to the same message descriptor,
 //     have the same set of populated known and extension field values,
 //     and the same set of unknown fields values.
 //
-//   - Lists are equal if they are the same length and
+//   - [List] values are equal if they are the same length and
 //     each corresponding element is equal.
 //
-//   - Maps are equal if they have the same set of keys and
+//   - [Map] values are equal if they have the same set of keys and
 //     the corresponding value for each key is equal.
 func (v1 Value) Equal(v2 Value) bool {
 	return equalValue(v1, v2)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
index 08e5ef73fc..1603097311 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -11,7 +11,7 @@ import (
 
 // Value is a union where only one Go type may be set at a time.
 // The Value is used to represent all possible values a field may take.
-// The following shows which Go type is used to represent each proto Kind:
+// The following shows which Go type is used to represent each proto [Kind]:
 //
 //	╔════════════╤═════════════════════════════════════╗
 //	║ Go type    │ Protobuf kind                       ║
@@ -31,22 +31,22 @@ import (
 //
 // Multiple protobuf Kinds may be represented by a single Go type if the type
 // can losslessly represent the information for the proto kind. For example,
-// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64,
+// [Int64Kind], [Sint64Kind], and [Sfixed64Kind] are all represented by int64,
 // but use different integer encoding methods.
 //
-// The List or Map types are used if the field cardinality is repeated.
-// A field is a List if FieldDescriptor.IsList reports true.
-// A field is a Map if FieldDescriptor.IsMap reports true.
+// The [List] or [Map] types are used if the field cardinality is repeated.
+// A field is a [List] if [FieldDescriptor.IsList] reports true.
+// A field is a [Map] if [FieldDescriptor.IsMap] reports true.
 //
 // Converting to/from a Value and a concrete Go value panics on type mismatch.
-// For example, ValueOf("hello").Int() panics because this attempts to
+// For example, [ValueOf]("hello").Int() panics because this attempts to
 // retrieve an int64 from a string.
 //
-// List, Map, and Message Values are called "composite" values.
+// [List], [Map], and [Message] Values are called "composite" values.
 //
 // A composite Value may alias (reference) memory at some location,
 // such that changes to the Value updates the that location.
-// A composite value acquired with a Mutable method, such as Message.Mutable,
+// A composite value acquired with a Mutable method, such as [Message.Mutable],
 // always references the source object.
 //
 // For example:
@@ -65,7 +65,7 @@ import (
 //	// appending to the List here may or may not modify the message.
 //	list.Append(protoreflect.ValueOfInt32(0))
 //
-// Some operations, such as Message.Get, may return an "empty, read-only"
+// Some operations, such as [Message.Get], may return an "empty, read-only"
 // composite Value. Modifying an empty, read-only value panics.
 type Value value
 
@@ -306,7 +306,7 @@ func (v Value) Float() float64 {
 	}
 }
 
-// String returns v as a string. Since this method implements fmt.Stringer,
+// String returns v as a string. Since this method implements [fmt.Stringer],
 // this returns the formatted string value for any non-string type.
 func (v Value) String() string {
 	switch v.typ {
@@ -327,7 +327,7 @@ func (v Value) Bytes() []byte {
 	}
 }
 
-// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber.
+// Enum returns v as a [EnumNumber] and panics if the type is not a [EnumNumber].
 func (v Value) Enum() EnumNumber {
 	switch v.typ {
 	case enumType:
@@ -337,7 +337,7 @@ func (v Value) Enum() EnumNumber {
 	}
 }
 
-// Message returns v as a Message and panics if the type is not a Message.
+// Message returns v as a [Message] and panics if the type is not a [Message].
 func (v Value) Message() Message {
 	switch vi := v.getIface().(type) {
 	case Message:
@@ -347,7 +347,7 @@ func (v Value) Message() Message {
 	}
 }
 
-// List returns v as a List and panics if the type is not a List.
+// List returns v as a [List] and panics if the type is not a [List].
 func (v Value) List() List {
 	switch vi := v.getIface().(type) {
 	case List:
@@ -357,7 +357,7 @@ func (v Value) List() List {
 	}
 }
 
-// Map returns v as a Map and panics if the type is not a Map.
+// Map returns v as a [Map] and panics if the type is not a [Map].
 func (v Value) Map() Map {
 	switch vi := v.getIface().(type) {
 	case Map:
@@ -367,7 +367,7 @@ func (v Value) Map() Map {
 	}
 }
 
-// MapKey returns v as a MapKey and panics for invalid MapKey types.
+// MapKey returns v as a [MapKey] and panics for invalid [MapKey] types.
 func (v Value) MapKey() MapKey {
 	switch v.typ {
 	case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType:
@@ -378,8 +378,8 @@ func (v Value) MapKey() MapKey {
 }
 
 // MapKey is used to index maps, where the Go type of the MapKey must match
-// the specified key Kind (see MessageDescriptor.IsMapEntry).
-// The following shows what Go type is used to represent each proto Kind:
+// the specified key [Kind] (see [MessageDescriptor.IsMapEntry]).
+// The following shows what Go type is used to represent each proto [Kind]:
 //
 //	╔═════════╤═════════════════════════════════════╗
 //	║ Go type │ Protobuf kind                       ║
@@ -392,13 +392,13 @@ func (v Value) MapKey() MapKey {
 //	║ string  │ StringKind                          ║
 //	╚═════════╧═════════════════════════════════════╝
 //
-// A MapKey is constructed and accessed through a Value:
+// A MapKey is constructed and accessed through a [Value]:
 //
 //	k := ValueOf("hash").MapKey() // convert string to MapKey
 //	s := k.String()               // convert MapKey to string
 //
-// The MapKey is a strict subset of valid types used in Value;
-// converting a Value to a MapKey with an invalid type panics.
+// The MapKey is a strict subset of valid types used in [Value];
+// converting a [Value] to a MapKey with an invalid type panics.
 type MapKey value
 
 // IsValid reports whether k is populated with a value.
@@ -426,13 +426,13 @@ func (k MapKey) Uint() uint64 {
 	return Value(k).Uint()
 }
 
-// String returns k as a string. Since this method implements fmt.Stringer,
+// String returns k as a string. Since this method implements [fmt.Stringer],
 // this returns the formatted string value for any non-string type.
 func (k MapKey) String() string {
 	return Value(k).String()
 }
 
-// Value returns k as a Value.
+// Value returns k as a [Value].
 func (k MapKey) Value() Value {
 	return Value(k)
 }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
similarity index 97%
rename from vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
rename to vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
index 702ddf22a2..b1fdbe3e8e 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-//go:build !purego && !appengine
-// +build !purego,!appengine
+//go:build !purego && !appengine && !go1.21
+// +build !purego,!appengine,!go1.21
 
 package protoreflect
 
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
new file mode 100644
index 0000000000..4354701117
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
@@ -0,0 +1,87 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !purego && !appengine && go1.21
+// +build !purego,!appengine,go1.21
+
+package protoreflect
+
+import (
+	"unsafe"
+
+	"google.golang.org/protobuf/internal/pragma"
+)
+
+type (
+	ifaceHeader struct {
+		_    [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
+		Type unsafe.Pointer
+		Data unsafe.Pointer
+	}
+)
+
+var (
+	nilType     = typeOf(nil)
+	boolType    = typeOf(*new(bool))
+	int32Type   = typeOf(*new(int32))
+	int64Type   = typeOf(*new(int64))
+	uint32Type  = typeOf(*new(uint32))
+	uint64Type  = typeOf(*new(uint64))
+	float32Type = typeOf(*new(float32))
+	float64Type = typeOf(*new(float64))
+	stringType  = typeOf(*new(string))
+	bytesType   = typeOf(*new([]byte))
+	enumType    = typeOf(*new(EnumNumber))
+)
+
+// typeOf returns a pointer to the Go type information.
+// The pointer is comparable and equal if and only if the types are identical.
+func typeOf(t interface{}) unsafe.Pointer {
+	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
+}
+
+// value is a union where only one type can be represented at a time.
+// The struct is 24B large on 64-bit systems and requires the minimum storage
+// necessary to represent each possible type.
+//
+// The Go GC needs to be able to scan variables containing pointers.
+// As such, pointers and non-pointers cannot be intermixed.
+type value struct {
+	pragma.DoNotCompare // 0B
+
+	// typ stores the type of the value as a pointer to the Go type.
+	typ unsafe.Pointer // 8B
+
+	// ptr stores the data pointer for a String, Bytes, or interface value.
+	ptr unsafe.Pointer // 8B
+
+	// num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
+	// Enum value as a raw uint64.
+	//
+	// It is also used to store the length of a String or Bytes value;
+	// the capacity is ignored.
+	num uint64 // 8B
+}
+
+func valueOfString(v string) Value {
+	return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))}
+}
+func valueOfBytes(v []byte) Value {
+	return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
+}
+func valueOfIface(v interface{}) Value {
+	p := (*ifaceHeader)(unsafe.Pointer(&v))
+	return Value{typ: p.Type, ptr: p.Data}
+}
+
+func (v Value) getString() string {
+	return unsafe.String((*byte)(v.ptr), v.num)
+}
+func (v Value) getBytes() []byte {
+	return unsafe.Slice((*byte)(v.ptr), v.num)
+}
+func (v Value) getIface() (x interface{}) {
+	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
+	return x
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
index aeb5597744..6267dc52a6 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -5,12 +5,12 @@
 // Package protoregistry provides data structures to register and lookup
 // protobuf descriptor types.
 //
-// The Files registry contains file descriptors and provides the ability
+// The [Files] registry contains file descriptors and provides the ability
 // to iterate over the files or lookup a specific descriptor within the files.
-// Files only contains protobuf descriptors and has no understanding of Go
+// [Files] only contains protobuf descriptors and has no understanding of Go
 // type information that may be associated with each descriptor.
 //
-// The Types registry contains descriptor types for which there is a known
+// The [Types] registry contains descriptor types for which there is a known
 // Go type associated with that descriptor. It provides the ability to iterate
 // over the registered types or lookup a type by name.
 package protoregistry
@@ -218,7 +218,7 @@ func (r *Files) checkGenProtoConflict(path string) {
 
 // FindDescriptorByName looks up a descriptor by the full name.
 //
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
 func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
 	if r == nil {
 		return nil, NotFound
@@ -310,7 +310,7 @@ func (s *nameSuffix) Pop() (name protoreflect.Name) {
 
 // FindFileByPath looks up a file by the path.
 //
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
 // This returns an error if multiple files have the same path.
 func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
 	if r == nil {
@@ -431,7 +431,7 @@ func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflec
 // A compliant implementation must deterministically return the same type
 // if no error is encountered.
 //
-// The Types type implements this interface.
+// The [Types] type implements this interface.
 type MessageTypeResolver interface {
 	// FindMessageByName looks up a message by its full name.
 	// E.g., "google.protobuf.Any"
@@ -451,7 +451,7 @@ type MessageTypeResolver interface {
 // A compliant implementation must deterministically return the same type
 // if no error is encountered.
 //
-// The Types type implements this interface.
+// The [Types] type implements this interface.
 type ExtensionTypeResolver interface {
 	// FindExtensionByName looks up a extension field by the field's full name.
 	// Note that this is the full name of the field as determined by
@@ -590,7 +590,7 @@ func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interfac
 // FindEnumByName looks up an enum by its full name.
 // E.g., "google.protobuf.Field.Kind".
 //
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
 func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) {
 	if r == nil {
 		return nil, NotFound
@@ -611,7 +611,7 @@ func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumTyp
 // FindMessageByName looks up a message by its full name,
 // e.g. "google.protobuf.Any".
 //
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
 func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
 	if r == nil {
 		return nil, NotFound
@@ -632,7 +632,7 @@ func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.M
 // FindMessageByURL looks up a message by a URL identifier.
 // See documentation on google.protobuf.Any.type_url for the URL format.
 //
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
 func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
 	// This function is similar to FindMessageByName but
 	// truncates anything before and including '/' in the URL.
@@ -662,7 +662,7 @@ func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
 // where the extension is declared and is unrelated to the full name of the
 // message being extended.
 //
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
 func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
 	if r == nil {
 		return nil, NotFound
@@ -703,7 +703,7 @@ func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.E
 // FindExtensionByNumber looks up a extension field by the field number
 // within some parent message, identified by full name.
 //
-// This returns (nil, NotFound) if not found.
+// This returns (nil, [NotFound]) if not found.
 func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
 	if r == nil {
 		return nil, NotFound
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 04c00f737c..38daa858d0 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -48,6 +48,94 @@ import (
 	sync "sync"
 )
 
+// The full set of known editions.
+type Edition int32
+
+const (
+	// A placeholder for an unknown edition value.
+	Edition_EDITION_UNKNOWN Edition = 0
+	// Legacy syntax "editions".  These pre-date editions, but behave much like
+	// distinct editions.  These can't be used to specify the edition of proto
+	// files, but feature definitions must supply proto2/proto3 defaults for
+	// backwards compatibility.
+	Edition_EDITION_PROTO2 Edition = 998
+	Edition_EDITION_PROTO3 Edition = 999
+	// Editions that have been released.  The specific values are arbitrary and
+	// should not be depended on, but they will always be time-ordered for easy
+	// comparison.
+	Edition_EDITION_2023 Edition = 1000
+	// Placeholder editions for testing feature resolution.  These should not be
+	// used or relyed on outside of tests.
+	Edition_EDITION_1_TEST_ONLY     Edition = 1
+	Edition_EDITION_2_TEST_ONLY     Edition = 2
+	Edition_EDITION_99997_TEST_ONLY Edition = 99997
+	Edition_EDITION_99998_TEST_ONLY Edition = 99998
+	Edition_EDITION_99999_TEST_ONLY Edition = 99999
+)
+
+// Enum value maps for Edition.
+var (
+	Edition_name = map[int32]string{
+		0:     "EDITION_UNKNOWN",
+		998:   "EDITION_PROTO2",
+		999:   "EDITION_PROTO3",
+		1000:  "EDITION_2023",
+		1:     "EDITION_1_TEST_ONLY",
+		2:     "EDITION_2_TEST_ONLY",
+		99997: "EDITION_99997_TEST_ONLY",
+		99998: "EDITION_99998_TEST_ONLY",
+		99999: "EDITION_99999_TEST_ONLY",
+	}
+	Edition_value = map[string]int32{
+		"EDITION_UNKNOWN":         0,
+		"EDITION_PROTO2":          998,
+		"EDITION_PROTO3":          999,
+		"EDITION_2023":            1000,
+		"EDITION_1_TEST_ONLY":     1,
+		"EDITION_2_TEST_ONLY":     2,
+		"EDITION_99997_TEST_ONLY": 99997,
+		"EDITION_99998_TEST_ONLY": 99998,
+		"EDITION_99999_TEST_ONLY": 99999,
+	}
+)
+
+func (x Edition) Enum() *Edition {
+	p := new(Edition)
+	*p = x
+	return p
+}
+
+func (x Edition) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Edition) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor()
+}
+
+func (Edition) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[0]
+}
+
+func (x Edition) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *Edition) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = Edition(num)
+	return nil
+}
+
+// Deprecated: Use Edition.Descriptor instead.
+func (Edition) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
+}
+
 // The verification state of the extension range.
 type ExtensionRangeOptions_VerificationState int32
 
@@ -80,11 +168,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string {
 }
 
 func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[0].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
 }
 
 func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[0]
+	return &file_google_protobuf_descriptor_proto_enumTypes[1]
 }
 
 func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber {
@@ -125,9 +213,10 @@ const (
 	FieldDescriptorProto_TYPE_BOOL    FieldDescriptorProto_Type = 8
 	FieldDescriptorProto_TYPE_STRING  FieldDescriptorProto_Type = 9
 	// Tag-delimited aggregate.
-	// Group type is deprecated and not supported in proto3. However, Proto3
+	// Group type is deprecated and not supported after google.protobuf. However, Proto3
 	// implementations should still be able to parse the group wire format and
-	// treat group fields as unknown fields.
+	// treat group fields as unknown fields.  In Editions, the group wire format
+	// can be enabled via the `message_encoding` feature.
 	FieldDescriptorProto_TYPE_GROUP   FieldDescriptorProto_Type = 10
 	FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // Length-delimited aggregate.
 	// New in version 2.
@@ -195,11 +284,11 @@ func (x FieldDescriptorProto_Type) String() string {
 }
 
 func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
 }
 
 func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[1]
+	return &file_google_protobuf_descriptor_proto_enumTypes[2]
 }
 
 func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
@@ -226,21 +315,24 @@ type FieldDescriptorProto_Label int32
 const (
 	// 0 is reserved for errors
 	FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
-	FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
 	FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+	// The required label is only allowed in google.protobuf.  In proto3 and Editions
+	// it's explicitly prohibited.  In Editions, the `field_presence` feature
+	// can be used to get this behavior.
+	FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
 )
 
 // Enum value maps for FieldDescriptorProto_Label.
 var (
 	FieldDescriptorProto_Label_name = map[int32]string{
 		1: "LABEL_OPTIONAL",
-		2: "LABEL_REQUIRED",
 		3: "LABEL_REPEATED",
+		2: "LABEL_REQUIRED",
 	}
 	FieldDescriptorProto_Label_value = map[string]int32{
 		"LABEL_OPTIONAL": 1,
-		"LABEL_REQUIRED": 2,
 		"LABEL_REPEATED": 3,
+		"LABEL_REQUIRED": 2,
 	}
 )
 
@@ -255,11 +347,11 @@ func (x FieldDescriptorProto_Label) String() string {
 }
 
 func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
 }
 
 func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[2]
+	return &file_google_protobuf_descriptor_proto_enumTypes[3]
 }
 
 func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
@@ -316,11 +408,11 @@ func (x FileOptions_OptimizeMode) String() string {
 }
 
 func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
 }
 
 func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[3]
+	return &file_google_protobuf_descriptor_proto_enumTypes[4]
 }
 
 func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
@@ -382,11 +474,11 @@ func (x FieldOptions_CType) String() string {
 }
 
 func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
 }
 
 func (FieldOptions_CType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[4]
+	return &file_google_protobuf_descriptor_proto_enumTypes[5]
 }
 
 func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
@@ -444,11 +536,11 @@ func (x FieldOptions_JSType) String() string {
 }
 
 func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
 }
 
 func (FieldOptions_JSType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[5]
+	return &file_google_protobuf_descriptor_proto_enumTypes[6]
 }
 
 func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
@@ -506,11 +598,11 @@ func (x FieldOptions_OptionRetention) String() string {
 }
 
 func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
 }
 
 func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[6]
+	return &file_google_protobuf_descriptor_proto_enumTypes[7]
 }
 
 func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
@@ -590,11 +682,11 @@ func (x FieldOptions_OptionTargetType) String() string {
 }
 
 func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
 }
 
 func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[7]
+	return &file_google_protobuf_descriptor_proto_enumTypes[8]
 }
 
 func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
@@ -652,11 +744,11 @@ func (x MethodOptions_IdempotencyLevel) String() string {
 }
 
 func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
 }
 
 func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[8]
+	return &file_google_protobuf_descriptor_proto_enumTypes[9]
 }
 
 func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
@@ -678,6 +770,363 @@ func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
 	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{17, 0}
 }
 
+type FeatureSet_FieldPresence int32
+
+const (
+	FeatureSet_FIELD_PRESENCE_UNKNOWN FeatureSet_FieldPresence = 0
+	FeatureSet_EXPLICIT               FeatureSet_FieldPresence = 1
+	FeatureSet_IMPLICIT               FeatureSet_FieldPresence = 2
+	FeatureSet_LEGACY_REQUIRED        FeatureSet_FieldPresence = 3
+)
+
+// Enum value maps for FeatureSet_FieldPresence.
+var (
+	FeatureSet_FieldPresence_name = map[int32]string{
+		0: "FIELD_PRESENCE_UNKNOWN",
+		1: "EXPLICIT",
+		2: "IMPLICIT",
+		3: "LEGACY_REQUIRED",
+	}
+	FeatureSet_FieldPresence_value = map[string]int32{
+		"FIELD_PRESENCE_UNKNOWN": 0,
+		"EXPLICIT":               1,
+		"IMPLICIT":               2,
+		"LEGACY_REQUIRED":        3,
+	}
+)
+
+func (x FeatureSet_FieldPresence) Enum() *FeatureSet_FieldPresence {
+	p := new(FeatureSet_FieldPresence)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_FieldPresence) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
+}
+
+func (FeatureSet_FieldPresence) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[10]
+}
+
+func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_FieldPresence) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_FieldPresence(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_FieldPresence.Descriptor instead.
+func (FeatureSet_FieldPresence) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+}
+
+type FeatureSet_EnumType int32
+
+const (
+	FeatureSet_ENUM_TYPE_UNKNOWN FeatureSet_EnumType = 0
+	FeatureSet_OPEN              FeatureSet_EnumType = 1
+	FeatureSet_CLOSED            FeatureSet_EnumType = 2
+)
+
+// Enum value maps for FeatureSet_EnumType.
+var (
+	FeatureSet_EnumType_name = map[int32]string{
+		0: "ENUM_TYPE_UNKNOWN",
+		1: "OPEN",
+		2: "CLOSED",
+	}
+	FeatureSet_EnumType_value = map[string]int32{
+		"ENUM_TYPE_UNKNOWN": 0,
+		"OPEN":              1,
+		"CLOSED":            2,
+	}
+)
+
+func (x FeatureSet_EnumType) Enum() *FeatureSet_EnumType {
+	p := new(FeatureSet_EnumType)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_EnumType) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
+}
+
+func (FeatureSet_EnumType) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[11]
+}
+
+func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_EnumType) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_EnumType(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_EnumType.Descriptor instead.
+func (FeatureSet_EnumType) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 1}
+}
+
+type FeatureSet_RepeatedFieldEncoding int32
+
+const (
+	FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN FeatureSet_RepeatedFieldEncoding = 0
+	FeatureSet_PACKED                          FeatureSet_RepeatedFieldEncoding = 1
+	FeatureSet_EXPANDED                        FeatureSet_RepeatedFieldEncoding = 2
+)
+
+// Enum value maps for FeatureSet_RepeatedFieldEncoding.
+var (
+	FeatureSet_RepeatedFieldEncoding_name = map[int32]string{
+		0: "REPEATED_FIELD_ENCODING_UNKNOWN",
+		1: "PACKED",
+		2: "EXPANDED",
+	}
+	FeatureSet_RepeatedFieldEncoding_value = map[string]int32{
+		"REPEATED_FIELD_ENCODING_UNKNOWN": 0,
+		"PACKED":                          1,
+		"EXPANDED":                        2,
+	}
+)
+
+func (x FeatureSet_RepeatedFieldEncoding) Enum() *FeatureSet_RepeatedFieldEncoding {
+	p := new(FeatureSet_RepeatedFieldEncoding)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_RepeatedFieldEncoding) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
+}
+
+func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[12]
+}
+
+func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_RepeatedFieldEncoding) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_RepeatedFieldEncoding(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_RepeatedFieldEncoding.Descriptor instead.
+func (FeatureSet_RepeatedFieldEncoding) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 2}
+}
+
+type FeatureSet_Utf8Validation int32
+
+const (
+	FeatureSet_UTF8_VALIDATION_UNKNOWN FeatureSet_Utf8Validation = 0
+	FeatureSet_NONE                    FeatureSet_Utf8Validation = 1
+	FeatureSet_VERIFY                  FeatureSet_Utf8Validation = 2
+)
+
+// Enum value maps for FeatureSet_Utf8Validation.
+var (
+	FeatureSet_Utf8Validation_name = map[int32]string{
+		0: "UTF8_VALIDATION_UNKNOWN",
+		1: "NONE",
+		2: "VERIFY",
+	}
+	FeatureSet_Utf8Validation_value = map[string]int32{
+		"UTF8_VALIDATION_UNKNOWN": 0,
+		"NONE":                    1,
+		"VERIFY":                  2,
+	}
+)
+
+func (x FeatureSet_Utf8Validation) Enum() *FeatureSet_Utf8Validation {
+	p := new(FeatureSet_Utf8Validation)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_Utf8Validation) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
+}
+
+func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[13]
+}
+
+func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_Utf8Validation) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_Utf8Validation(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_Utf8Validation.Descriptor instead.
+func (FeatureSet_Utf8Validation) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 3}
+}
+
+type FeatureSet_MessageEncoding int32
+
+const (
+	FeatureSet_MESSAGE_ENCODING_UNKNOWN FeatureSet_MessageEncoding = 0
+	FeatureSet_LENGTH_PREFIXED          FeatureSet_MessageEncoding = 1
+	FeatureSet_DELIMITED                FeatureSet_MessageEncoding = 2
+)
+
+// Enum value maps for FeatureSet_MessageEncoding.
+var (
+	FeatureSet_MessageEncoding_name = map[int32]string{
+		0: "MESSAGE_ENCODING_UNKNOWN",
+		1: "LENGTH_PREFIXED",
+		2: "DELIMITED",
+	}
+	FeatureSet_MessageEncoding_value = map[string]int32{
+		"MESSAGE_ENCODING_UNKNOWN": 0,
+		"LENGTH_PREFIXED":          1,
+		"DELIMITED":                2,
+	}
+)
+
+func (x FeatureSet_MessageEncoding) Enum() *FeatureSet_MessageEncoding {
+	p := new(FeatureSet_MessageEncoding)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_MessageEncoding) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
+}
+
+func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[14]
+}
+
+func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_MessageEncoding) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_MessageEncoding(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_MessageEncoding.Descriptor instead.
+func (FeatureSet_MessageEncoding) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 4}
+}
+
+type FeatureSet_JsonFormat int32
+
+const (
+	FeatureSet_JSON_FORMAT_UNKNOWN FeatureSet_JsonFormat = 0
+	FeatureSet_ALLOW               FeatureSet_JsonFormat = 1
+	FeatureSet_LEGACY_BEST_EFFORT  FeatureSet_JsonFormat = 2
+)
+
+// Enum value maps for FeatureSet_JsonFormat.
+var (
+	FeatureSet_JsonFormat_name = map[int32]string{
+		0: "JSON_FORMAT_UNKNOWN",
+		1: "ALLOW",
+		2: "LEGACY_BEST_EFFORT",
+	}
+	FeatureSet_JsonFormat_value = map[string]int32{
+		"JSON_FORMAT_UNKNOWN": 0,
+		"ALLOW":               1,
+		"LEGACY_BEST_EFFORT":  2,
+	}
+)
+
+func (x FeatureSet_JsonFormat) Enum() *FeatureSet_JsonFormat {
+	p := new(FeatureSet_JsonFormat)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_JsonFormat) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
+}
+
+func (FeatureSet_JsonFormat) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[15]
+}
+
+func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_JsonFormat) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_JsonFormat(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_JsonFormat.Descriptor instead.
+func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5}
+}
+
 // Represents the identified object's effect on the element in the original
 // .proto file.
 type GeneratedCodeInfo_Annotation_Semantic int32
@@ -716,11 +1165,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string {
 }
 
 func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
 }
 
 func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[9]
+	return &file_google_protobuf_descriptor_proto_enumTypes[16]
 }
 
 func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
@@ -739,7 +1188,7 @@ func (x *GeneratedCodeInfo_Annotation_Semantic) UnmarshalJSON(b []byte) error {
 
 // Deprecated: Use GeneratedCodeInfo_Annotation_Semantic.Descriptor instead.
 func (GeneratedCodeInfo_Annotation_Semantic) EnumDescriptor() ([]byte, []int) {
-	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0, 0}
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0, 0}
 }
 
 // The protocol compiler can output a FileDescriptorSet containing the .proto
@@ -822,8 +1271,8 @@ type FileDescriptorProto struct {
 	//
 	// If `edition` is present, this value must be "editions".
 	Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
-	// The edition of the proto file, which is an opaque string.
-	Edition *string `protobuf:"bytes,13,opt,name=edition" json:"edition,omitempty"`
+	// The edition of the proto file.
+	Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
 }
 
 func (x *FileDescriptorProto) Reset() {
@@ -942,11 +1391,11 @@ func (x *FileDescriptorProto) GetSyntax() string {
 	return ""
 }
 
-func (x *FileDescriptorProto) GetEdition() string {
+func (x *FileDescriptorProto) GetEdition() Edition {
 	if x != nil && x.Edition != nil {
 		return *x.Edition
 	}
-	return ""
+	return Edition_EDITION_UNKNOWN
 }
 
 // Describes a message type.
@@ -1079,13 +1528,14 @@ type ExtensionRangeOptions struct {
 
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
-	// go/protobuf-stripping-extension-declarations
-	// Like Metadata, but we use a repeated field to hold all extension
-	// declarations. This should avoid the size increases of transforming a large
-	// extension range into small ranges in generated binaries.
+	// For external users: DO NOT USE. We are in the process of open sourcing
+	// extension declaration and executing internal cleanups before it can be
+	// used externally.
 	Declaration []*ExtensionRangeOptions_Declaration `protobuf:"bytes,2,rep,name=declaration" json:"declaration,omitempty"`
+	// Any features defined in the specific edition.
+	Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
 	// The verification state of the range.
-	// TODO(b/278783756): flip the default to DECLARATION once all empty ranges
+	// TODO: flip the default to DECLARATION once all empty ranges
 	// are marked as UNVERIFIED.
 	Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
 }
@@ -1141,6 +1591,13 @@ func (x *ExtensionRangeOptions) GetDeclaration() []*ExtensionRangeOptions_Declar
 	return nil
 }
 
+func (x *ExtensionRangeOptions) GetFeatures() *FeatureSet {
+	if x != nil {
+		return x.Features
+	}
+	return nil
+}
+
 func (x *ExtensionRangeOptions) GetVerification() ExtensionRangeOptions_VerificationState {
 	if x != nil && x.Verification != nil {
 		return *x.Verification
@@ -1772,6 +2229,8 @@ type FileOptions struct {
 	// is empty. When this option is not set, the package name will be used for
 	// determining the ruby package.
 	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
+	// Any features defined in the specific edition.
+	Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here.
 	// See the documentation for the "Options" section above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -1963,6 +2422,13 @@ func (x *FileOptions) GetRubyPackage() string {
 	return ""
 }
 
+func (x *FileOptions) GetFeatures() *FeatureSet {
+	if x != nil {
+		return x.Features
+	}
+	return nil
+}
+
 func (x *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
 	if x != nil {
 		return x.UninterpretedOption
@@ -2039,11 +2505,13 @@ type MessageOptions struct {
 	// This should only be used as a temporary measure against broken builds due
 	// to the change in behavior for JSON field name conflicts.
 	//
-	// TODO(b/261750190) This is legacy behavior we plan to remove once downstream
+	// TODO This is legacy behavior we plan to remove once downstream
 	// teams have had time to migrate.
 	//
 	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 	DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
+	// Any features defined in the specific edition.
+	Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
 }
@@ -2123,6 +2591,13 @@ func (x *MessageOptions) GetDeprecatedLegacyJsonFieldConflicts() bool {
 	return false
 }
 
+func (x *MessageOptions) GetFeatures() *FeatureSet {
+	if x != nil {
+		return x.Features
+	}
+	return nil
+}
+
 func (x *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
 	if x != nil {
 		return x.UninterpretedOption
@@ -2147,7 +2622,9 @@ type FieldOptions struct {
 	// a more efficient representation on the wire. Rather than repeatedly
 	// writing the tag and type for each element, the entire array is encoded as
 	// a single length-delimited blob. In proto3, only explicit setting it to
-	// false will avoid using packed encoding.
+	// false will avoid using packed encoding.  This option is prohibited in
+	// Editions, but the `repeated_field_encoding` feature can be used to control
+	// the behavior.
 	Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
 	// The jstype option determines the JavaScript type used for values of the
 	// field.  The option is permitted only for 64 bit integral and fixed types
@@ -2205,11 +2682,12 @@ type FieldOptions struct {
 	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
 	// Indicate that the field value should not be printed out when using debug
 	// formats, e.g. when the field contains sensitive credentials.
-	DebugRedact *bool                         `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
-	Retention   *FieldOptions_OptionRetention `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"`
-	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
-	Target  *FieldOptions_OptionTargetType  `protobuf:"varint,18,opt,name=target,enum=google.protobuf.FieldOptions_OptionTargetType" json:"target,omitempty"`
-	Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
+	DebugRedact     *bool                           `protobuf:"varint,16,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
+	Retention       *FieldOptions_OptionRetention   `protobuf:"varint,17,opt,name=retention,enum=google.protobuf.FieldOptions_OptionRetention" json:"retention,omitempty"`
+	Targets         []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
+	EditionDefaults []*FieldOptions_EditionDefault  `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
+	// Any features defined in the specific edition.
+	Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
 }
@@ -2320,14 +2798,6 @@ func (x *FieldOptions) GetRetention() FieldOptions_OptionRetention {
 	return FieldOptions_RETENTION_UNKNOWN
 }
 
-// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
-func (x *FieldOptions) GetTarget() FieldOptions_OptionTargetType {
-	if x != nil && x.Target != nil {
-		return *x.Target
-	}
-	return FieldOptions_TARGET_TYPE_UNKNOWN
-}
-
 func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType {
 	if x != nil {
 		return x.Targets
@@ -2335,6 +2805,20 @@ func (x *FieldOptions) GetTargets() []FieldOptions_OptionTargetType {
 	return nil
 }
 
+func (x *FieldOptions) GetEditionDefaults() []*FieldOptions_EditionDefault {
+	if x != nil {
+		return x.EditionDefaults
+	}
+	return nil
+}
+
+func (x *FieldOptions) GetFeatures() *FeatureSet {
+	if x != nil {
+		return x.Features
+	}
+	return nil
+}
+
 func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
 	if x != nil {
 		return x.UninterpretedOption
@@ -2348,6 +2832,8 @@ type OneofOptions struct {
 	unknownFields   protoimpl.UnknownFields
 	extensionFields protoimpl.ExtensionFields
 
+	// Any features defined in the specific edition.
+	Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
 }
@@ -2384,6 +2870,13 @@ func (*OneofOptions) Descriptor() ([]byte, []int) {
 	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{13}
 }
 
+func (x *OneofOptions) GetFeatures() *FeatureSet {
+	if x != nil {
+		return x.Features
+	}
+	return nil
+}
+
 func (x *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
 	if x != nil {
 		return x.UninterpretedOption
@@ -2409,11 +2902,13 @@ type EnumOptions struct {
 	// and strips underscored from the fields before comparison in proto3 only.
 	// The new behavior takes `json_name` into account and applies to proto2 as
 	// well.
-	// TODO(b/261750190) Remove this legacy behavior once downstream teams have
+	// TODO Remove this legacy behavior once downstream teams have
 	// had time to migrate.
 	//
 	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 	DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
+	// Any features defined in the specific edition.
+	Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
 }
@@ -2477,6 +2972,13 @@ func (x *EnumOptions) GetDeprecatedLegacyJsonFieldConflicts() bool {
 	return false
 }
 
+func (x *EnumOptions) GetFeatures() *FeatureSet {
+	if x != nil {
+		return x.Features
+	}
+	return nil
+}
+
 func (x *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
 	if x != nil {
 		return x.UninterpretedOption
@@ -2495,13 +2997,20 @@ type EnumValueOptions struct {
 	// for the enum value, or it will be completely ignored; in the very least,
 	// this is a formalization for deprecating enum values.
 	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Any features defined in the specific edition.
+	Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
+	// Indicate that fields annotated with this enum value should not be printed
+	// out when using debug formats, e.g. when the field contains sensitive
+	// credentials.
+	DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
 }
 
 // Default values for EnumValueOptions fields.
 const (
-	Default_EnumValueOptions_Deprecated = bool(false)
+	Default_EnumValueOptions_Deprecated  = bool(false)
+	Default_EnumValueOptions_DebugRedact = bool(false)
 )
 
 func (x *EnumValueOptions) Reset() {
@@ -2543,6 +3052,20 @@ func (x *EnumValueOptions) GetDeprecated() bool {
 	return Default_EnumValueOptions_Deprecated
 }
 
+func (x *EnumValueOptions) GetFeatures() *FeatureSet {
+	if x != nil {
+		return x.Features
+	}
+	return nil
+}
+
+func (x *EnumValueOptions) GetDebugRedact() bool {
+	if x != nil && x.DebugRedact != nil {
+		return *x.DebugRedact
+	}
+	return Default_EnumValueOptions_DebugRedact
+}
+
 func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
 	if x != nil {
 		return x.UninterpretedOption
@@ -2556,6 +3079,8 @@ type ServiceOptions struct {
 	unknownFields   protoimpl.UnknownFields
 	extensionFields protoimpl.ExtensionFields
 
+	// Any features defined in the specific edition.
+	Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
 	// Is this service deprecated?
 	// Depending on the target platform, this can emit Deprecated annotations
 	// for the service, or it will be completely ignored; in the very least,
@@ -2602,6 +3127,13 @@ func (*ServiceOptions) Descriptor() ([]byte, []int) {
 	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{16}
 }
 
+func (x *ServiceOptions) GetFeatures() *FeatureSet {
+	if x != nil {
+		return x.Features
+	}
+	return nil
+}
+
 func (x *ServiceOptions) GetDeprecated() bool {
 	if x != nil && x.Deprecated != nil {
 		return *x.Deprecated
@@ -2628,6 +3160,8 @@ type MethodOptions struct {
 	// this is a formalization for deprecating methods.
 	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
+	// Any features defined in the specific edition.
+	Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
 }
@@ -2684,6 +3218,13 @@ func (x *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
 	return Default_MethodOptions_IdempotencyLevel
 }
 
+func (x *MethodOptions) GetFeatures() *FeatureSet {
+	if x != nil {
+		return x.Features
+	}
+	return nil
+}
+
 func (x *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
 	if x != nil {
 		return x.UninterpretedOption
@@ -2794,6 +3335,171 @@ func (x *UninterpretedOption) GetAggregateValue() string {
 	return ""
 }
 
+// TODO Enums in C++ gencode (and potentially other languages) are
+// not well scoped.  This means that each of the feature enums below can clash
+// with each other.  The short names we've chosen maximize call-site
+// readability, but leave us very open to this scenario.  A future feature will
+// be designed and implemented to handle this, hopefully before we ever hit a
+// conflict here.
+type FeatureSet struct {
+	state           protoimpl.MessageState
+	sizeCache       protoimpl.SizeCache
+	unknownFields   protoimpl.UnknownFields
+	extensionFields protoimpl.ExtensionFields
+
+	FieldPresence         *FeatureSet_FieldPresence         `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
+	EnumType              *FeatureSet_EnumType              `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
+	RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
+	Utf8Validation        *FeatureSet_Utf8Validation        `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
+	MessageEncoding       *FeatureSet_MessageEncoding       `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
+	JsonFormat            *FeatureSet_JsonFormat            `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+}
+
+func (x *FeatureSet) Reset() {
+	*x = FeatureSet{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *FeatureSet) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSet) ProtoMessage() {}
+
+func (x *FeatureSet) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSet.ProtoReflect.Descriptor instead.
+func (*FeatureSet) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19}
+}
+
+func (x *FeatureSet) GetFieldPresence() FeatureSet_FieldPresence {
+	if x != nil && x.FieldPresence != nil {
+		return *x.FieldPresence
+	}
+	return FeatureSet_FIELD_PRESENCE_UNKNOWN
+}
+
+func (x *FeatureSet) GetEnumType() FeatureSet_EnumType {
+	if x != nil && x.EnumType != nil {
+		return *x.EnumType
+	}
+	return FeatureSet_ENUM_TYPE_UNKNOWN
+}
+
+func (x *FeatureSet) GetRepeatedFieldEncoding() FeatureSet_RepeatedFieldEncoding {
+	if x != nil && x.RepeatedFieldEncoding != nil {
+		return *x.RepeatedFieldEncoding
+	}
+	return FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN
+}
+
+func (x *FeatureSet) GetUtf8Validation() FeatureSet_Utf8Validation {
+	if x != nil && x.Utf8Validation != nil {
+		return *x.Utf8Validation
+	}
+	return FeatureSet_UTF8_VALIDATION_UNKNOWN
+}
+
+func (x *FeatureSet) GetMessageEncoding() FeatureSet_MessageEncoding {
+	if x != nil && x.MessageEncoding != nil {
+		return *x.MessageEncoding
+	}
+	return FeatureSet_MESSAGE_ENCODING_UNKNOWN
+}
+
+func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat {
+	if x != nil && x.JsonFormat != nil {
+		return *x.JsonFormat
+	}
+	return FeatureSet_JSON_FORMAT_UNKNOWN
+}
+
+// A compiled specification for the defaults of a set of features.  These
+// messages are generated from FeatureSet extensions and can be used to seed
+// feature resolution. The resolution with this object becomes a simple search
+// for the closest matching edition, followed by proto merges.
+type FeatureSetDefaults struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"`
+	// The minimum supported edition (inclusive) when this was constructed.
+	// Editions before this will not have defaults.
+	MinimumEdition *Edition `protobuf:"varint,4,opt,name=minimum_edition,json=minimumEdition,enum=google.protobuf.Edition" json:"minimum_edition,omitempty"`
+	// The maximum known edition (inclusive) when this was constructed. Editions
+	// after this will not have reliable defaults.
+	MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"`
+}
+
+func (x *FeatureSetDefaults) Reset() {
+	*x = FeatureSetDefaults{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *FeatureSetDefaults) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSetDefaults) ProtoMessage() {}
+
+func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSetDefaults.ProtoReflect.Descriptor instead.
+func (*FeatureSetDefaults) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20}
+}
+
+func (x *FeatureSetDefaults) GetDefaults() []*FeatureSetDefaults_FeatureSetEditionDefault {
+	if x != nil {
+		return x.Defaults
+	}
+	return nil
+}
+
+func (x *FeatureSetDefaults) GetMinimumEdition() Edition {
+	if x != nil && x.MinimumEdition != nil {
+		return *x.MinimumEdition
+	}
+	return Edition_EDITION_UNKNOWN
+}
+
+func (x *FeatureSetDefaults) GetMaximumEdition() Edition {
+	if x != nil && x.MaximumEdition != nil {
+		return *x.MaximumEdition
+	}
+	return Edition_EDITION_UNKNOWN
+}
+
 // Encapsulates information about the original source file from which a
 // FileDescriptorProto was generated.
 type SourceCodeInfo struct {
@@ -2855,7 +3561,7 @@ type SourceCodeInfo struct {
 func (x *SourceCodeInfo) Reset() {
 	*x = SourceCodeInfo{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -2868,7 +3574,7 @@ func (x *SourceCodeInfo) String() string {
 func (*SourceCodeInfo) ProtoMessage() {}
 
 func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -2881,7 +3587,7 @@ func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
 
 // Deprecated: Use SourceCodeInfo.ProtoReflect.Descriptor instead.
 func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
-	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19}
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21}
 }
 
 func (x *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
@@ -2907,7 +3613,7 @@ type GeneratedCodeInfo struct {
 func (x *GeneratedCodeInfo) Reset() {
 	*x = GeneratedCodeInfo{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -2920,7 +3626,7 @@ func (x *GeneratedCodeInfo) String() string {
 func (*GeneratedCodeInfo) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -2933,7 +3639,7 @@ func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
 
 // Deprecated: Use GeneratedCodeInfo.ProtoReflect.Descriptor instead.
 func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
-	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20}
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22}
 }
 
 func (x *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
@@ -2956,7 +3662,7 @@ type DescriptorProto_ExtensionRange struct {
 func (x *DescriptorProto_ExtensionRange) Reset() {
 	*x = DescriptorProto_ExtensionRange{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -2969,7 +3675,7 @@ func (x *DescriptorProto_ExtensionRange) String() string {
 func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
 
 func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -3021,7 +3727,7 @@ type DescriptorProto_ReservedRange struct {
 func (x *DescriptorProto_ReservedRange) Reset() {
 	*x = DescriptorProto_ReservedRange{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -3034,7 +3740,7 @@ func (x *DescriptorProto_ReservedRange) String() string {
 func (*DescriptorProto_ReservedRange) ProtoMessage() {}
 
 func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -3078,10 +3784,6 @@ type ExtensionRangeOptions_Declaration struct {
 	// Metadata.type, Declaration.type must have a leading dot for messages
 	// and enums.
 	Type *string `protobuf:"bytes,3,opt,name=type" json:"type,omitempty"`
-	// Deprecated. Please use "repeated".
-	//
-	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
-	IsRepeated *bool `protobuf:"varint,4,opt,name=is_repeated,json=isRepeated" json:"is_repeated,omitempty"`
 	// If true, indicates that the number is reserved in the extension range,
 	// and any extension field with the number will fail to compile. Set this
 	// when a declared extension field is deleted.
@@ -3094,7 +3796,7 @@ type ExtensionRangeOptions_Declaration struct {
 func (x *ExtensionRangeOptions_Declaration) Reset() {
 	*x = ExtensionRangeOptions_Declaration{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -3107,7 +3809,7 @@ func (x *ExtensionRangeOptions_Declaration) String() string {
 func (*ExtensionRangeOptions_Declaration) ProtoMessage() {}
 
 func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -3144,14 +3846,6 @@ func (x *ExtensionRangeOptions_Declaration) GetType() string {
 	return ""
 }
 
-// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
-func (x *ExtensionRangeOptions_Declaration) GetIsRepeated() bool {
-	if x != nil && x.IsRepeated != nil {
-		return *x.IsRepeated
-	}
-	return false
-}
-
 func (x *ExtensionRangeOptions_Declaration) GetReserved() bool {
 	if x != nil && x.Reserved != nil {
 		return *x.Reserved
@@ -3184,7 +3878,7 @@ type EnumDescriptorProto_EnumReservedRange struct {
 func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
 	*x = EnumDescriptorProto_EnumReservedRange{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -3197,7 +3891,7 @@ func (x *EnumDescriptorProto_EnumReservedRange) String() string {
 func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {}
 
 func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -3227,6 +3921,61 @@ func (x *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
 	return 0
 }
 
+type FieldOptions_EditionDefault struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+	Value   *string  `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
+}
+
+func (x *FieldOptions_EditionDefault) Reset() {
+	*x = FieldOptions_EditionDefault{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *FieldOptions_EditionDefault) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldOptions_EditionDefault) ProtoMessage() {}
+
+func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldOptions_EditionDefault.ProtoReflect.Descriptor instead.
+func (*FieldOptions_EditionDefault) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 0}
+}
+
+func (x *FieldOptions_EditionDefault) GetEdition() Edition {
+	if x != nil && x.Edition != nil {
+		return *x.Edition
+	}
+	return Edition_EDITION_UNKNOWN
+}
+
+func (x *FieldOptions_EditionDefault) GetValue() string {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return ""
+}
+
 // The name of the uninterpreted option.  Each string represents a segment in
 // a dot-separated name.  is_extension is true iff a segment represents an
 // extension (denoted with parentheses in options specs in .proto files).
@@ -3244,7 +3993,7 @@ type UninterpretedOption_NamePart struct {
 func (x *UninterpretedOption_NamePart) Reset() {
 	*x = UninterpretedOption_NamePart{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -3257,7 +4006,7 @@ func (x *UninterpretedOption_NamePart) String() string {
 func (*UninterpretedOption_NamePart) ProtoMessage() {}
 
 func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -3287,6 +4036,65 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
 	return false
 }
 
+// A map from every known edition with a unique set of defaults to its
+// defaults. Not all editions may be contained here.  For a given edition,
+// the defaults at the closest matching edition ordered at or before it should
+// be used.  This field must be in strict ascending order by edition.
+type FeatureSetDefaults_FeatureSetEditionDefault struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Edition  *Edition    `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+	Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
+}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
+	*x = FeatureSetDefaults_FeatureSetEditionDefault{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSetDefaults_FeatureSetEditionDefault.ProtoReflect.Descriptor instead.
+func (*FeatureSetDefaults_FeatureSetEditionDefault) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0}
+}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetEdition() Edition {
+	if x != nil && x.Edition != nil {
+		return *x.Edition
+	}
+	return Edition_EDITION_UNKNOWN
+}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet {
+	if x != nil {
+		return x.Features
+	}
+	return nil
+}
+
 type SourceCodeInfo_Location struct {
 	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
@@ -3388,7 +4196,7 @@ type SourceCodeInfo_Location struct {
 func (x *SourceCodeInfo_Location) Reset() {
 	*x = SourceCodeInfo_Location{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -3401,7 +4209,7 @@ func (x *SourceCodeInfo_Location) String() string {
 func (*SourceCodeInfo_Location) ProtoMessage() {}
 
 func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -3414,7 +4222,7 @@ func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
 
 // Deprecated: Use SourceCodeInfo_Location.ProtoReflect.Descriptor instead.
 func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
-	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{21, 0}
 }
 
 func (x *SourceCodeInfo_Location) GetPath() []int32 {
@@ -3475,7 +4283,7 @@ type GeneratedCodeInfo_Annotation struct {
 func (x *GeneratedCodeInfo_Annotation) Reset() {
 	*x = GeneratedCodeInfo_Annotation{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+		mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -3488,7 +4296,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string {
 func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -3501,7 +4309,7 @@ func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
 
 // Deprecated: Use GeneratedCodeInfo_Annotation.ProtoReflect.Descriptor instead.
 func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
-	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{20, 0}
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{22, 0}
 }
 
 func (x *GeneratedCodeInfo_Annotation) GetPath() []int32 {
@@ -3550,7 +4358,7 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
 	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
 	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
 	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
-	0x6c, 0x65, 0x22, 0xfe, 0x04, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+	0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
 	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
 	0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18,
 	0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
@@ -3588,527 +4396,687 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
 	0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
 	0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
 	0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x64, 0x69,
-	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74,
-	0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
-	0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66,
-	0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65,
-	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a,
-	0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65,
-	0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20,
-	0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
-	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54,
-	0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
-	0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
-	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45,
-	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a,
-	0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28,
-	0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44,
-	0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07,
-	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55,
-	0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
-	0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
-	0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
-	0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
-	0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65,
-	0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78,
-	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05,
-	0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61,
-	0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
-	0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
-	0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
-	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76,
-	0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
-	0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a,
-	0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22,
-	0xad, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e,
-	0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
-	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
-	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e,
-	0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01,
-	0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x68,
-	0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
-	0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
-	0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, 0x72,
-	0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, 0x0a,
-	0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69,
-	0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xb3, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63,
-	0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62,
-	0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
-	0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a,
-	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
-	0x65, 0x12, 0x23, 0x0a, 0x0b, 0x69, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
-	0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x52, 0x65,
-	0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
-	0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
-	0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06,
-	0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x22, 0x34,
-	0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
-	0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49,
-	0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49,
-	0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22,
-	0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
-	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06,
-	0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75,
-	0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20,
-	0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c,
-	0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
-	0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69,
+	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06,
+	0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
+	0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+	0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
 	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
-	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70,
-	0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f,
-	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65,
-	0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65,
-	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65,
-	0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
-	0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69,
-	0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f,
-	0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e,
-	0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e,
-	0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08,
-	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18,
-	0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f,
-	0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12,
-	0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12,
-	0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12,
-	0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04,
-	0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05,
-	0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34,
-	0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44,
-	0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f,
-	0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49,
-	0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f,
-	0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53,
-	0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42,
-	0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
-	0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53,
-	0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50,
-	0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b,
-	0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a,
-	0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43,
-	0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c,
-	0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c,
-	0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12,
-	0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45,
-	0x44, 0x10, 0x03, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63,
-	0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
-	0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
-	0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
-	0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75,
-	0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
-	0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
-	0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44,
-	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05,
-	0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a,
-	0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18,
-	0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63,
-	0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
-	0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72,
-	0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d,
-	0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20,
-	0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d,
-	0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
-	0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65,
+	0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a,
+	0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e,
+	0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
+	0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a,
+	0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
+	0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
+	0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e,
+	0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+	0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a,
+	0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+	0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65,
+	0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+	0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
+	0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
+	0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+	0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+	0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
 	0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03,
-	0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83,
-	0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63,
-	0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
-	0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
-	0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+	0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40,
+	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65,
+	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
+	0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
+	0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xc7, 0x04, 0x0a, 0x15, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
+	0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
+	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a,
+	0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
+	0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61,
+	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63,
+	0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
+	0x73, 0x12, 0x68, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+	0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74,
+	0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x52, 0x0c, 0x76,
+	0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, 0x01, 0x0a, 0x0b,
+	0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6e,
+	0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d,
+	0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, 0x61, 0x6d, 0x65,
+	0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+	0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+	0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
+	0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01,
+	0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, 0x04, 0x08, 0x04,
+	0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, 0x43, 0x4c, 0x41,
+	0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45,
+	0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
+	0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
+	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
+	0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65,
+	0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4c,
+	0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, 0x0a, 0x04, 0x74,
+	0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+	0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+	0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74,
+	0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+	0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65,
+	0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65,
+	0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x66,
+	0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x6e, 0x65,
+	0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a,
+	0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73,
+	0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a,
+	0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, 0x04, 0x54, 0x79,
+	0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, 0x55, 0x42, 0x4c,
+	0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x4c, 0x4f, 0x41,
+	0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x36,
+	0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54,
+	0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x54,
+	0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58,
+	0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
+	0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59,
+	0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
+	0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x54,
+	0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59,
+	0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, 0x12, 0x11, 0x0a,
+	0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x10,
+	0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10,
+	0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, 0x36, 0x34,
+	0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x0e, 0x4c,
+	0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
+	0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54, 0x45,
+	0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x51,
+	0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, 0x65, 0x6f, 0x66,
 	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
 	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20,
-	0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63,
-	0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74,
-	0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03,
-	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89,
-	0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
-	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a,
-	0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f,
-	0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e,
+	0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xe3, 0x02, 0x0a,
+	0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
+	0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x6f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
+	0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61,
+	0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
+	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+	0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
+	0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
+	0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d,
+	0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
+	0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73,
+	0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
+	0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
+	0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65,
+	0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65,
+	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+	0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
+	0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
+	0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, 0x53, 0x65, 0x72,
+	0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
+	0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f,
+	0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52,
+	0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+	0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04,
+	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12,
+	0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, 0x79, 0x70, 0x65,
+	0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c,
+	0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05,
+	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69,
+	0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10,
+	0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
+	0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73,
+	0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0xca,
+	0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21,
+	0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67,
+	0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f,
+	0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e,
+	0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74,
+	0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08,
+	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c,
+	0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61,
+	0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61,
+	0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28,
+	0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72,
+	0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68,
+	0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f,
+	0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08,
+	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72,
+	0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c,
+	0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01,
+	0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53,
+	0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f,
+	0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18,
+	0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
+	0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73,
+	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
+	0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53,
+	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f,
+	0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+	0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a,
+	0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+	0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63,
+	0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a,
+	0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69,
+	0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70,
+	0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+	0x73, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12,
+	0x70, 0x68, 0x70, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+	0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+	0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f,
+	0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20,
+	0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61,
+	0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a,
+	0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50,
+	0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f,
+	0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65,
+	0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
+	0x18, 0x27, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65,
+	0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73,
+	0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70,
+	0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a,
+	0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
+	0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
+	0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x14, 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e,
+	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79,
+	0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+	0x72, 0x75, 0x62, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
 	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
-	0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
-	0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76,
-	0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01,
-	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65,
-	0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x22, 0x91, 0x09, 0x0a, 0x0b, 0x46,
-	0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61,
-	0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a,
-	0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73,
-	0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76,
-	0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12,
-	0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65,
-	0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c,
-	0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67,
-	0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61,
-	0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18,
-	0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45,
-	0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16,
-	0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63,
-	0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43,
-	0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69,
-	0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74,
-	0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44,
-	0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a,
-	0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13,
-	0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65,
-	0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01,
-	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47,
-	0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35,
-	0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72,
-	0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72,
-	0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x14, 0x70, 0x68, 0x70, 0x5f, 0x67, 0x65, 0x6e,
-	0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x2a, 0x20,
-	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x12, 0x70, 0x68, 0x70, 0x47,
-	0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25,
-	0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01,
-	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
-	0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62,
-	0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a,
-	0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41,
-	0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c,
-	0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69,
-	0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
-	0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68,
-	0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
-	0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01,
-	0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
-	0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65,
-	0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c,
-	0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70,
-	0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34,
-	0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e,
-	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14,
-	0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73,
-	0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63,
-	0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79,
-	0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
-	0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
-	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
-	0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64,
-	0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
-	0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c,
-	0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08,
-	0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xbb,
-	0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74,
-	0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01,
-	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61,
-	0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
-	0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64,
-	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
-	0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
-	0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a,
-	0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
-	0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
-	0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72,
-	0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72,
-	0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
-	0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c,
-	0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28,
-	0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
-	0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
-	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
-	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
-	0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07,
-	0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x85, 0x09, 0x0a,
-	0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a,
-	0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70,
-	0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65,
-	0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
-	0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79,
-	0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09,
-	0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70,
-	0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a,
-	0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f,
-	0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18,
-	0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e,
-	0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a,
-	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
-	0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28,
-	0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28,
-	0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62,
-	0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65,
-	0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65,
-	0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18,
-	0x12, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65,
-	0x74, 0x54, 0x79, 0x70, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65,
-	0x74, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03,
-	0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
-	0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75,
-	0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69,
-	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a,
-	0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f,
-	0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50,
-	0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65,
-	0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12,
-	0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d,
-	0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a,
-	0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
-	0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e,
-	0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e,
-	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14,
-	0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52,
-	0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54,
-	0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
-	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
-	0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
-	0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47,
-	0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f,
-	0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52,
-	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
-	0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
-	0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52,
-	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05,
-	0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
-	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59,
-	0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
-	0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54,
-	0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f,
-	0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
-	0x08, 0x04, 0x10, 0x05, 0x22, 0x73, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74,
+	0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
 	0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
 	0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
 	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
 	0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09,
-	0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x98, 0x02, 0x0a, 0x0b, 0x45, 0x6e,
-	0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c,
-	0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a,
-	0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
+	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a,
+	0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09,
+	0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44,
+	0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45,
+	0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
+	0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03, 0x0a, 0x0e,
+	0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c,
+	0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69,
+	0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a,
+	0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53,
+	0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f,
+	0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f,
+	0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+	0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
 	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
 	0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
-	0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f,
-	0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c,
-	0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
-	0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
-	0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
-	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
-	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04,
-	0x08, 0x05, 0x10, 0x06, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c,
-	0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
-	0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
-	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
-	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
-	0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9c, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
-	0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
-	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
+	0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07,
+	0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56,
+	0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67,
+	0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63,
+	0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02,
+	0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65,
+	0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e,
+	0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
+	0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
 	0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
 	0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24,
 	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
 	0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
 	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
 	0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80,
-	0x80, 0x80, 0x80, 0x02, 0x22, 0xe0, 0x02, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
-	0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
-	0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a,
-	0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76,
-	0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f,
-	0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74,
-	0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50,
-	0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10,
-	0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c,
-	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
-	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64,
-	0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17,
-	0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e,
-	0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49,
-	0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a,
-	0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8,
-	0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e,
-	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
-	0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e,
+	0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06,
+	0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09,
+	0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52,
+	0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47,
+	0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53,
+	0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52,
+	0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18,
+	0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61,
+	0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
+	0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
+	0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61,
+	0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61,
+	0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04,
+	0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65,
+	0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73,
+	0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b,
+	0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28,
+	0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74,
+	0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+	0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x74, 0x61,
+	0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+	0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45,
+	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65,
+	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37,
+	0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74,
+	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+	0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
+	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e,
+	0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
+	0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03,
+	0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07,
+	0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x2f, 0x0a,
+	0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47,
+	0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c,
+	0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02, 0x22, 0x35,
+	0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
+	0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54,
+	0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d,
+	0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+	0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45,
+	0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
+	0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e,
+	0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54,
+	0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a,
+	0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70,
+	0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41,
+	0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01,
+	0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+	0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10,
+	0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41,
+	0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10,
+	0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45,
+	0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47,
+	0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a,
+	0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
+	0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41,
+	0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43,
+	0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
+	0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07,
+	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12,
+	0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
+	0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14,
+	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e,
+	0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
+	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
+	0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69,
+	0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
+	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70,
+	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a,
+	0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69,
+	0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64,
+	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a,
+	0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74,
+	0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x07, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
+	0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e,
+	0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e,
+	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+	0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a,
+	0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65,
+	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05,
+	0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+	0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
+	0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65,
+	0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
+	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65,
+	0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+	0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+	0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
+	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09,
+	0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65,
+	0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08,
+	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+	0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
+	0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14,
+	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e,
+	0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
+	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
+	0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65,
+	0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a,
+	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64,
+	0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18,
+	0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63,
+	0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45,
+	0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65,
+	0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a,
+	0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
+	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7,
+	0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
+	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69,
+	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c,
+	0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45,
+	0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a,
+	0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53,
+	0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54,
+	0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03,
+	0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
+	0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61,
+	0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e,
+	0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f,
+	0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
+	0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e,
+	0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e,
+	0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
+	0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c,
+	0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61,
+	0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e,
+	0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a,
+	0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61,
+	0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e,
+	0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69,
+	0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfc, 0x09, 0x0a, 0x0a, 0x46,
+	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x66, 0x69,
+	0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x39, 0x88,
+	0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
+	0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50,
+	0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
+	0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x50,
+	0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f,
+	0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
+	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65,
+	0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b, 0x12, 0x06,
+	0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4f, 0x50,
+	0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12,
+	0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52,
+	0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f,
+	0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2,
+	0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2,
+	0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52, 0x15, 0x72,
+	0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f,
+	0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c,
+	0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e,
 	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61,
-	0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
-	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64,
-	0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a,
-	0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61,
-	0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74,
-	0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e,
-	0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76,
-	0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
-	0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52,
-	0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
-	0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01,
-	0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
-	0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c,
-	0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67,
-	0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65,
-	0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72,
-	0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72,
-	0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
-	0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e,
-	0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43,
-	0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
-	0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72,
-	0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01,
-	0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61,
-	0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61,
-	0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05,
-	0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65,
-	0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03,
-	0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d,
-	0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e,
-	0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e,
-	0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65,
-	0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18,
-	0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65,
-	0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0,
-	0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65,
-	0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72,
-	0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e,
-	0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
-	0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05,
-	0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f,
-	0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62,
-	0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69,
-	0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03,
-	0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18,
-	0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
-	0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73,
-	0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e,
-	0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a,
-	0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10,
-	0x02, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
-	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64,
-	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02,
-	0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f,
-	0x6e,
+	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56,
+	0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01,
+	0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0xe6, 0x07,
+	0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0x52, 0x0e,
+	0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x78,
+	0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69,
+	0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
+	0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01,
+	0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46,
+	0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
+	0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e,
+	0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46,
+	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06,
+	0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42,
+	0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a,
+	0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e,
+	0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50,
+	0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, 0x4c, 0x44,
+	0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
+	0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
+	0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x02, 0x12,
+	0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52,
+	0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65,
+	0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e,
+	0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x10,
+	0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, 0x56, 0x0a,
+	0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e,
+	0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, 0x41, 0x54,
+	0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e,
+	0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x50,
+	0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e,
+	0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c,
+	0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, 0x38, 0x5f,
+	0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
+	0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x0a,
+	0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x10, 0x02, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65,
+	0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a,
+	0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e,
+	0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c,
+	0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01,
+	0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22,
+	0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a,
+	0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b,
+	0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10,
+	0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54,
+	0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9,
+	0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90,
+	0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12, 0x46, 0x65,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73,
+	0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
+	0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44,
+	0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
+	0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+	0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69,
+	0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20,
+	0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d,
+	0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a,
+	0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+	0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+	0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45,
+	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a,
+	0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
+	0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
+	0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a, 0x0e, 0x53,
+	0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a,
+	0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+	0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+	0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02,
+	0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e,
+	0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e,
+	0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d,
+	0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64,
+	0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74,
+	0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73,
+	0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
+	0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64,
+	0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d,
+	0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61,
+	0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d,
+	0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+	0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e,
+	0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e,
+	0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61,
+	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e,
+	0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68,
+	0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68,
+	0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c,
+	0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
+	0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04,
+	0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d,
+	0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65,
+	0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e,
+	0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e,
+	0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a,
+	0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e,
+	0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05,
+	0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xea, 0x01, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74,
+	0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55,
+	0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54,
+	0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a,
+	0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10,
+	0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30,
+	0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
+	0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17,
+	0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54,
+	0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49,
+	0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e,
+	0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
+	0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
+	0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
+	0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
+	0x10, 0x9f, 0x8d, 0x06, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a,
+	0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
+	0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
+	0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01,
+	0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63,
+	0x74, 0x69, 0x6f, 0x6e,
 }
 
 var (
@@ -4123,103 +5091,136 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
 	return file_google_protobuf_descriptor_proto_rawDescData
 }
 
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 10)
-var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 28)
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32)
 var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
-	(ExtensionRangeOptions_VerificationState)(0),  // 0: google.protobuf.ExtensionRangeOptions.VerificationState
-	(FieldDescriptorProto_Type)(0),                // 1: google.protobuf.FieldDescriptorProto.Type
-	(FieldDescriptorProto_Label)(0),               // 2: google.protobuf.FieldDescriptorProto.Label
-	(FileOptions_OptimizeMode)(0),                 // 3: google.protobuf.FileOptions.OptimizeMode
-	(FieldOptions_CType)(0),                       // 4: google.protobuf.FieldOptions.CType
-	(FieldOptions_JSType)(0),                      // 5: google.protobuf.FieldOptions.JSType
-	(FieldOptions_OptionRetention)(0),             // 6: google.protobuf.FieldOptions.OptionRetention
-	(FieldOptions_OptionTargetType)(0),            // 7: google.protobuf.FieldOptions.OptionTargetType
-	(MethodOptions_IdempotencyLevel)(0),           // 8: google.protobuf.MethodOptions.IdempotencyLevel
-	(GeneratedCodeInfo_Annotation_Semantic)(0),    // 9: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
-	(*FileDescriptorSet)(nil),                     // 10: google.protobuf.FileDescriptorSet
-	(*FileDescriptorProto)(nil),                   // 11: google.protobuf.FileDescriptorProto
-	(*DescriptorProto)(nil),                       // 12: google.protobuf.DescriptorProto
-	(*ExtensionRangeOptions)(nil),                 // 13: google.protobuf.ExtensionRangeOptions
-	(*FieldDescriptorProto)(nil),                  // 14: google.protobuf.FieldDescriptorProto
-	(*OneofDescriptorProto)(nil),                  // 15: google.protobuf.OneofDescriptorProto
-	(*EnumDescriptorProto)(nil),                   // 16: google.protobuf.EnumDescriptorProto
-	(*EnumValueDescriptorProto)(nil),              // 17: google.protobuf.EnumValueDescriptorProto
-	(*ServiceDescriptorProto)(nil),                // 18: google.protobuf.ServiceDescriptorProto
-	(*MethodDescriptorProto)(nil),                 // 19: google.protobuf.MethodDescriptorProto
-	(*FileOptions)(nil),                           // 20: google.protobuf.FileOptions
-	(*MessageOptions)(nil),                        // 21: google.protobuf.MessageOptions
-	(*FieldOptions)(nil),                          // 22: google.protobuf.FieldOptions
-	(*OneofOptions)(nil),                          // 23: google.protobuf.OneofOptions
-	(*EnumOptions)(nil),                           // 24: google.protobuf.EnumOptions
-	(*EnumValueOptions)(nil),                      // 25: google.protobuf.EnumValueOptions
-	(*ServiceOptions)(nil),                        // 26: google.protobuf.ServiceOptions
-	(*MethodOptions)(nil),                         // 27: google.protobuf.MethodOptions
-	(*UninterpretedOption)(nil),                   // 28: google.protobuf.UninterpretedOption
-	(*SourceCodeInfo)(nil),                        // 29: google.protobuf.SourceCodeInfo
-	(*GeneratedCodeInfo)(nil),                     // 30: google.protobuf.GeneratedCodeInfo
-	(*DescriptorProto_ExtensionRange)(nil),        // 31: google.protobuf.DescriptorProto.ExtensionRange
-	(*DescriptorProto_ReservedRange)(nil),         // 32: google.protobuf.DescriptorProto.ReservedRange
-	(*ExtensionRangeOptions_Declaration)(nil),     // 33: google.protobuf.ExtensionRangeOptions.Declaration
-	(*EnumDescriptorProto_EnumReservedRange)(nil), // 34: google.protobuf.EnumDescriptorProto.EnumReservedRange
-	(*UninterpretedOption_NamePart)(nil),          // 35: google.protobuf.UninterpretedOption.NamePart
-	(*SourceCodeInfo_Location)(nil),               // 36: google.protobuf.SourceCodeInfo.Location
-	(*GeneratedCodeInfo_Annotation)(nil),          // 37: google.protobuf.GeneratedCodeInfo.Annotation
+	(Edition)(0), // 0: google.protobuf.Edition
+	(ExtensionRangeOptions_VerificationState)(0),        // 1: google.protobuf.ExtensionRangeOptions.VerificationState
+	(FieldDescriptorProto_Type)(0),                      // 2: google.protobuf.FieldDescriptorProto.Type
+	(FieldDescriptorProto_Label)(0),                     // 3: google.protobuf.FieldDescriptorProto.Label
+	(FileOptions_OptimizeMode)(0),                       // 4: google.protobuf.FileOptions.OptimizeMode
+	(FieldOptions_CType)(0),                             // 5: google.protobuf.FieldOptions.CType
+	(FieldOptions_JSType)(0),                            // 6: google.protobuf.FieldOptions.JSType
+	(FieldOptions_OptionRetention)(0),                   // 7: google.protobuf.FieldOptions.OptionRetention
+	(FieldOptions_OptionTargetType)(0),                  // 8: google.protobuf.FieldOptions.OptionTargetType
+	(MethodOptions_IdempotencyLevel)(0),                 // 9: google.protobuf.MethodOptions.IdempotencyLevel
+	(FeatureSet_FieldPresence)(0),                       // 10: google.protobuf.FeatureSet.FieldPresence
+	(FeatureSet_EnumType)(0),                            // 11: google.protobuf.FeatureSet.EnumType
+	(FeatureSet_RepeatedFieldEncoding)(0),               // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding
+	(FeatureSet_Utf8Validation)(0),                      // 13: google.protobuf.FeatureSet.Utf8Validation
+	(FeatureSet_MessageEncoding)(0),                     // 14: google.protobuf.FeatureSet.MessageEncoding
+	(FeatureSet_JsonFormat)(0),                          // 15: google.protobuf.FeatureSet.JsonFormat
+	(GeneratedCodeInfo_Annotation_Semantic)(0),          // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+	(*FileDescriptorSet)(nil),                           // 17: google.protobuf.FileDescriptorSet
+	(*FileDescriptorProto)(nil),                         // 18: google.protobuf.FileDescriptorProto
+	(*DescriptorProto)(nil),                             // 19: google.protobuf.DescriptorProto
+	(*ExtensionRangeOptions)(nil),                       // 20: google.protobuf.ExtensionRangeOptions
+	(*FieldDescriptorProto)(nil),                        // 21: google.protobuf.FieldDescriptorProto
+	(*OneofDescriptorProto)(nil),                        // 22: google.protobuf.OneofDescriptorProto
+	(*EnumDescriptorProto)(nil),                         // 23: google.protobuf.EnumDescriptorProto
+	(*EnumValueDescriptorProto)(nil),                    // 24: google.protobuf.EnumValueDescriptorProto
+	(*ServiceDescriptorProto)(nil),                      // 25: google.protobuf.ServiceDescriptorProto
+	(*MethodDescriptorProto)(nil),                       // 26: google.protobuf.MethodDescriptorProto
+	(*FileOptions)(nil),                                 // 27: google.protobuf.FileOptions
+	(*MessageOptions)(nil),                              // 28: google.protobuf.MessageOptions
+	(*FieldOptions)(nil),                                // 29: google.protobuf.FieldOptions
+	(*OneofOptions)(nil),                                // 30: google.protobuf.OneofOptions
+	(*EnumOptions)(nil),                                 // 31: google.protobuf.EnumOptions
+	(*EnumValueOptions)(nil),                            // 32: google.protobuf.EnumValueOptions
+	(*ServiceOptions)(nil),                              // 33: google.protobuf.ServiceOptions
+	(*MethodOptions)(nil),                               // 34: google.protobuf.MethodOptions
+	(*UninterpretedOption)(nil),                         // 35: google.protobuf.UninterpretedOption
+	(*FeatureSet)(nil),                                  // 36: google.protobuf.FeatureSet
+	(*FeatureSetDefaults)(nil),                          // 37: google.protobuf.FeatureSetDefaults
+	(*SourceCodeInfo)(nil),                              // 38: google.protobuf.SourceCodeInfo
+	(*GeneratedCodeInfo)(nil),                           // 39: google.protobuf.GeneratedCodeInfo
+	(*DescriptorProto_ExtensionRange)(nil),              // 40: google.protobuf.DescriptorProto.ExtensionRange
+	(*DescriptorProto_ReservedRange)(nil),               // 41: google.protobuf.DescriptorProto.ReservedRange
+	(*ExtensionRangeOptions_Declaration)(nil),           // 42: google.protobuf.ExtensionRangeOptions.Declaration
+	(*EnumDescriptorProto_EnumReservedRange)(nil),       // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
+	(*FieldOptions_EditionDefault)(nil),                 // 44: google.protobuf.FieldOptions.EditionDefault
+	(*UninterpretedOption_NamePart)(nil),                // 45: google.protobuf.UninterpretedOption.NamePart
+	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+	(*SourceCodeInfo_Location)(nil),                     // 47: google.protobuf.SourceCodeInfo.Location
+	(*GeneratedCodeInfo_Annotation)(nil),                // 48: google.protobuf.GeneratedCodeInfo.Annotation
 }
 var file_google_protobuf_descriptor_proto_depIdxs = []int32{
-	11, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
-	12, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
-	16, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
-	18, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
-	14, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
-	20, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
-	29, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
-	14, // 7: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
-	14, // 8: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
-	12, // 9: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
-	16, // 10: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
-	31, // 11: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
-	15, // 12: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
-	21, // 13: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
-	32, // 14: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
-	28, // 15: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	33, // 16: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
-	0,  // 17: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
-	2,  // 18: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
-	1,  // 19: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
-	22, // 20: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
-	23, // 21: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
-	17, // 22: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
-	24, // 23: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
-	34, // 24: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
-	25, // 25: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
-	19, // 26: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
-	26, // 27: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
-	27, // 28: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
-	3,  // 29: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
-	28, // 30: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	28, // 31: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	4,  // 32: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
-	5,  // 33: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
-	6,  // 34: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
-	7,  // 35: google.protobuf.FieldOptions.target:type_name -> google.protobuf.FieldOptions.OptionTargetType
-	7,  // 36: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
-	28, // 37: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	28, // 38: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	28, // 39: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	28, // 40: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	28, // 41: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	8,  // 42: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
-	28, // 43: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	35, // 44: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
-	36, // 45: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
-	37, // 46: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
-	13, // 47: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
-	9,  // 48: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
-	49, // [49:49] is the sub-list for method output_type
-	49, // [49:49] is the sub-list for method input_type
-	49, // [49:49] is the sub-list for extension type_name
-	49, // [49:49] is the sub-list for extension extendee
-	0,  // [0:49] is the sub-list for field type_name
+	18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+	19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+	23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+	25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+	21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+	27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+	38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+	0,  // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
+	21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+	21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+	19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+	23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+	40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+	22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+	28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+	41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+	35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
+	36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
+	1,  // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
+	3,  // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
+	2,  // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
+	29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+	30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+	24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+	31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+	43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+	32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+	26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+	33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+	34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+	4,  // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
+	36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
+	35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
+	35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	5,  // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
+	6,  // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
+	7,  // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
+	8,  // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
+	44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
+	36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
+	35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
+	35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
+	35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
+	35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+	35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	9,  // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+	36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+	35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+	10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
+	11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
+	12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
+	13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
+	14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
+	15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
+	46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+	0,  // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+	0,  // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+	47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+	48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+	20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+	0,  // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+	0,  // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+	36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet
+	16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+	71, // [71:71] is the sub-list for method output_type
+	71, // [71:71] is the sub-list for method input_type
+	71, // [71:71] is the sub-list for extension type_name
+	71, // [71:71] is the sub-list for extension extendee
+	0,  // [0:71] is the sub-list for field type_name
 }
 
 func init() { file_google_protobuf_descriptor_proto_init() }
@@ -4475,19 +5476,21 @@ func file_google_protobuf_descriptor_proto_init() {
 			}
 		}
 		file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SourceCodeInfo); i {
+			switch v := v.(*FeatureSet); i {
 			case 0:
 				return &v.state
 			case 1:
 				return &v.sizeCache
 			case 2:
 				return &v.unknownFields
+			case 3:
+				return &v.extensionFields
 			default:
 				return nil
 			}
 		}
 		file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*GeneratedCodeInfo); i {
+			switch v := v.(*FeatureSetDefaults); i {
 			case 0:
 				return &v.state
 			case 1:
@@ -4499,7 +5502,7 @@ func file_google_protobuf_descriptor_proto_init() {
 			}
 		}
 		file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*DescriptorProto_ExtensionRange); i {
+			switch v := v.(*SourceCodeInfo); i {
 			case 0:
 				return &v.state
 			case 1:
@@ -4511,7 +5514,7 @@ func file_google_protobuf_descriptor_proto_init() {
 			}
 		}
 		file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*DescriptorProto_ReservedRange); i {
+			switch v := v.(*GeneratedCodeInfo); i {
 			case 0:
 				return &v.state
 			case 1:
@@ -4523,7 +5526,7 @@ func file_google_protobuf_descriptor_proto_init() {
 			}
 		}
 		file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*ExtensionRangeOptions_Declaration); i {
+			switch v := v.(*DescriptorProto_ExtensionRange); i {
 			case 0:
 				return &v.state
 			case 1:
@@ -4535,7 +5538,7 @@ func file_google_protobuf_descriptor_proto_init() {
 			}
 		}
 		file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
+			switch v := v.(*DescriptorProto_ReservedRange); i {
 			case 0:
 				return &v.state
 			case 1:
@@ -4547,7 +5550,7 @@ func file_google_protobuf_descriptor_proto_init() {
 			}
 		}
 		file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*UninterpretedOption_NamePart); i {
+			switch v := v.(*ExtensionRangeOptions_Declaration); i {
 			case 0:
 				return &v.state
 			case 1:
@@ -4559,7 +5562,7 @@ func file_google_protobuf_descriptor_proto_init() {
 			}
 		}
 		file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
-			switch v := v.(*SourceCodeInfo_Location); i {
+			switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
 			case 0:
 				return &v.state
 			case 1:
@@ -4571,6 +5574,54 @@ func file_google_protobuf_descriptor_proto_init() {
 			}
 		}
 		file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*FieldOptions_EditionDefault); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*UninterpretedOption_NamePart); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*SourceCodeInfo_Location); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
 			switch v := v.(*GeneratedCodeInfo_Annotation); i {
 			case 0:
 				return &v.state
@@ -4588,8 +5639,8 @@ func file_google_protobuf_descriptor_proto_init() {
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
 			RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc,
-			NumEnums:      10,
-			NumMessages:   28,
+			NumEnums:      17,
+			NumMessages:   32,
 			NumExtensions: 0,
 			NumServices:   0,
 		},
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 580b232f47..9de51be540 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -237,7 +237,8 @@ type Any struct {
 	//
 	// Note: this functionality is not currently available in the official
 	// protobuf release, and it is not used for type URLs beginning with
-	// type.googleapis.com.
+	// type.googleapis.com. As of May 2023, there are no widely used type server
+	// implementations and no plans to implement one.
 	//
 	// Schemes other than `http`, `https` (or the empty scheme) might be
 	// used with implementation specific semantics.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ab4956dfd8..112ade66d8 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,4 +1,4 @@
-# cloud.google.com/go v0.111.0
+# cloud.google.com/go v0.112.0
 ## explicit; go 1.19
 cloud.google.com/go/internal
 cloud.google.com/go/internal/optional
@@ -14,7 +14,7 @@ cloud.google.com/go/compute/metadata
 ## explicit; go 1.19
 cloud.google.com/go/iam
 cloud.google.com/go/iam/apiv1/iampb
-# cloud.google.com/go/storage v1.35.1
+# cloud.google.com/go/storage v1.36.0
 ## explicit; go 1.19
 cloud.google.com/go/storage
 cloud.google.com/go/storage/internal
@@ -51,7 +51,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log
 github.com/Azure/azure-sdk-for-go/sdk/internal/poller
 github.com/Azure/azure-sdk-for-go/sdk/internal/temporal
 github.com/Azure/azure-sdk-for-go/sdk/internal/uuid
-# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0
+# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1
 ## explicit; go 1.18
 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob
 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob
@@ -66,7 +66,7 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared
 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob
 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas
 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service
-# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0
+# github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1
 ## explicit; go 1.18
 github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache
 github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential
@@ -113,7 +113,7 @@ github.com/VividCortex/ewma
 # github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9
 ## explicit; go 1.15
 github.com/alecthomas/units
-# github.com/aws/aws-sdk-go v1.49.1
+# github.com/aws/aws-sdk-go v1.49.22
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go/aws
 github.com/aws/aws-sdk-go/aws/auth/bearer
@@ -157,7 +157,7 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface
 github.com/aws/aws-sdk-go/service/ssooidc
 github.com/aws/aws-sdk-go/service/sts
 github.com/aws/aws-sdk-go/service/sts/stsiface
-# github.com/aws/aws-sdk-go-v2 v1.24.0
+# github.com/aws/aws-sdk-go-v2 v1.24.1
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/aws
 github.com/aws/aws-sdk-go-v2/aws/arn
@@ -189,10 +189,10 @@ github.com/aws/aws-sdk-go-v2/internal/timeconv
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream
 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi
-# github.com/aws/aws-sdk-go-v2/config v1.26.1
+# github.com/aws/aws-sdk-go-v2/config v1.26.4
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/config
-# github.com/aws/aws-sdk-go-v2/credentials v1.16.12
+# github.com/aws/aws-sdk-go-v2/credentials v1.16.15
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/credentials
 github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds
@@ -201,23 +201,23 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client
 github.com/aws/aws-sdk-go-v2/credentials/processcreds
 github.com/aws/aws-sdk-go-v2/credentials/ssocreds
 github.com/aws/aws-sdk-go-v2/credentials/stscreds
-# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10
+# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/feature/ec2/imds
 github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config
-# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7
+# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.12
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/feature/s3/manager
-# github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9
+# github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/internal/configsources
-# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9
+# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2
 # github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/internal/ini
-# github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9
+# github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/internal/v4a
 github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto
@@ -225,35 +225,35 @@ github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4
 # github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding
-# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9
+# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/service/internal/checksum
-# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9
+# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url
-# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9
+# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/service/internal/s3shared
 github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn
 github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config
-# github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5
+# github.com/aws/aws-sdk-go-v2/service/s3 v1.48.0
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/service/s3
 github.com/aws/aws-sdk-go-v2/service/s3/internal/arn
 github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations
 github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints
 github.com/aws/aws-sdk-go-v2/service/s3/types
-# github.com/aws/aws-sdk-go-v2/service/sso v1.18.5
+# github.com/aws/aws-sdk-go-v2/service/sso v1.18.6
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/service/sso
 github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints
 github.com/aws/aws-sdk-go-v2/service/sso/types
-# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5
+# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/service/ssooidc
 github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints
 github.com/aws/aws-sdk-go-v2/service/ssooidc/types
-# github.com/aws/aws-sdk-go-v2/service/sts v1.26.5
+# github.com/aws/aws-sdk-go-v2/service/sts v1.26.7
 ## explicit; go 1.19
 github.com/aws/aws-sdk-go-v2/service/sts
 github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints
@@ -320,7 +320,7 @@ github.com/go-kit/log/level
 # github.com/go-logfmt/logfmt v0.6.0
 ## explicit; go 1.17
 github.com/go-logfmt/logfmt
-# github.com/go-logr/logr v1.3.0
+# github.com/go-logr/logr v1.4.1
 ## explicit; go 1.18
 github.com/go-logr/logr
 github.com/go-logr/logr/funcr
@@ -393,8 +393,8 @@ github.com/googleapis/gax-go/v2/internal
 ## explicit; go 1.17
 github.com/grafana/regexp
 github.com/grafana/regexp/syntax
-# github.com/influxdata/influxdb v1.11.2
-## explicit; go 1.19
+# github.com/influxdata/influxdb v1.11.4
+## explicit; go 1.20
 github.com/influxdata/influxdb/client/v2
 github.com/influxdata/influxdb/models
 github.com/influxdata/influxdb/pkg/escape
@@ -436,9 +436,6 @@ github.com/mattn/go-isatty
 # github.com/mattn/go-runewidth v0.0.15
 ## explicit; go 1.9
 github.com/mattn/go-runewidth
-# github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0
-## explicit; go 1.19
-github.com/matttproud/golang_protobuf_extensions/v2/pbutil
 # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
 ## explicit
 github.com/modern-go/concurrent
@@ -451,7 +448,7 @@ github.com/mwitkow/go-conntrack
 # github.com/oklog/ulid v1.3.1
 ## explicit
 github.com/oklog/ulid
-# github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8
+# github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c
 ## explicit; go 1.14
 github.com/pkg/browser
 # github.com/pkg/errors v0.9.1
@@ -460,17 +457,18 @@ github.com/pkg/errors
 # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
 ## explicit
 github.com/pmezard/go-difflib/difflib
-# github.com/prometheus/client_golang v1.17.0
+# github.com/prometheus/client_golang v1.18.0
 ## explicit; go 1.19
 github.com/prometheus/client_golang/prometheus
 github.com/prometheus/client_golang/prometheus/internal
 github.com/prometheus/client_golang/prometheus/promauto
 github.com/prometheus/client_golang/prometheus/testutil
 github.com/prometheus/client_golang/prometheus/testutil/promlint
+github.com/prometheus/client_golang/prometheus/testutil/promlint/validations
 # github.com/prometheus/client_model v0.5.0
 ## explicit; go 1.19
 github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.45.0
+# github.com/prometheus/common v0.46.0
 ## explicit; go 1.20
 github.com/prometheus/common/config
 github.com/prometheus/common/expfmt
@@ -536,7 +534,7 @@ github.com/russross/blackfriday/v2
 ## explicit; go 1.20
 github.com/stretchr/testify/assert
 github.com/stretchr/testify/require
-# github.com/urfave/cli/v2 v2.26.0
+# github.com/urfave/cli/v2 v2.27.1
 ## explicit; go 1.18
 github.com/urfave/cli/v2
 # github.com/valyala/bytebufferpool v1.0.0
@@ -561,7 +559,7 @@ github.com/valyala/histogram
 # github.com/valyala/quicktemplate v1.7.0
 ## explicit; go 1.11
 github.com/valyala/quicktemplate
-# github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673
+# github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e
 ## explicit
 github.com/xrash/smetrics
 # go.opencensus.io v0.24.0
@@ -583,7 +581,7 @@ go.opencensus.io/trace
 go.opencensus.io/trace/internal
 go.opencensus.io/trace/propagation
 go.opencensus.io/trace/tracestate
-# go.opentelemetry.io/collector/pdata v1.0.0
+# go.opentelemetry.io/collector/pdata v1.0.1
 ## explicit; go 1.20
 go.opentelemetry.io/collector/pdata/internal
 go.opentelemetry.io/collector/pdata/internal/data
@@ -600,7 +598,7 @@ go.opentelemetry.io/collector/pdata/internal/otlp
 go.opentelemetry.io/collector/pdata/pcommon
 go.opentelemetry.io/collector/pdata/pmetric
 go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp
-# go.opentelemetry.io/collector/semconv v0.91.0
+# go.opentelemetry.io/collector/semconv v0.92.0
 ## explicit; go 1.20
 go.opentelemetry.io/collector/semconv/v1.6.1
 # go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1
@@ -642,7 +640,7 @@ go.uber.org/goleak/internal/stack
 # go.uber.org/multierr v1.11.0
 ## explicit; go 1.19
 go.uber.org/multierr
-# golang.org/x/crypto v0.16.0
+# golang.org/x/crypto v0.18.0
 ## explicit; go 1.18
 golang.org/x/crypto/chacha20
 golang.org/x/crypto/chacha20poly1305
@@ -653,11 +651,11 @@ golang.org/x/crypto/internal/alias
 golang.org/x/crypto/internal/poly1305
 golang.org/x/crypto/pkcs12
 golang.org/x/crypto/pkcs12/internal/rc2
-# golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb
+# golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3
 ## explicit; go 1.20
 golang.org/x/exp/constraints
 golang.org/x/exp/slices
-# golang.org/x/net v0.19.0
+# golang.org/x/net v0.20.0
 ## explicit; go 1.18
 golang.org/x/net/http/httpguts
 golang.org/x/net/http/httpproxy
@@ -668,7 +666,7 @@ golang.org/x/net/internal/socks
 golang.org/x/net/internal/timeseries
 golang.org/x/net/proxy
 golang.org/x/net/trace
-# golang.org/x/oauth2 v0.15.0
+# golang.org/x/oauth2 v0.16.0
 ## explicit; go 1.18
 golang.org/x/oauth2
 golang.org/x/oauth2/authhandler
@@ -680,11 +678,11 @@ golang.org/x/oauth2/google/internal/stsexchange
 golang.org/x/oauth2/internal
 golang.org/x/oauth2/jws
 golang.org/x/oauth2/jwt
-# golang.org/x/sync v0.5.0
+# golang.org/x/sync v0.6.0
 ## explicit; go 1.18
 golang.org/x/sync/errgroup
 golang.org/x/sync/semaphore
-# golang.org/x/sys v0.15.0
+# golang.org/x/sys v0.16.0
 ## explicit; go 1.18
 golang.org/x/sys/cpu
 golang.org/x/sys/unix
@@ -700,7 +698,7 @@ golang.org/x/text/unicode/norm
 golang.org/x/time/rate
 # golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028
 ## explicit; go 1.18
-# google.golang.org/api v0.154.0
+# google.golang.org/api v0.156.0
 ## explicit; go 1.19
 google.golang.org/api/googleapi
 google.golang.org/api/googleapi/transport
@@ -730,21 +728,21 @@ google.golang.org/appengine/internal/modules
 google.golang.org/appengine/internal/remote_api
 google.golang.org/appengine/internal/urlfetch
 google.golang.org/appengine/urlfetch
-# google.golang.org/genproto v0.0.0-20231212172506-995d672761c0
+# google.golang.org/genproto v0.0.0-20240108191215-35c7eff3a6b1
 ## explicit; go 1.19
 google.golang.org/genproto/googleapis/type/date
 google.golang.org/genproto/googleapis/type/expr
 google.golang.org/genproto/internal
-# google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0
+# google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1
 ## explicit; go 1.19
 google.golang.org/genproto/googleapis/api
 google.golang.org/genproto/googleapis/api/annotations
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20231212172506-995d672761c0
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1
 ## explicit; go 1.19
 google.golang.org/genproto/googleapis/rpc/code
 google.golang.org/genproto/googleapis/rpc/errdetails
 google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.60.0
+# google.golang.org/grpc v1.60.1
 ## explicit; go 1.19
 google.golang.org/grpc
 google.golang.org/grpc/attributes
@@ -810,8 +808,9 @@ google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/stats
 google.golang.org/grpc/status
 google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.31.0
-## explicit; go 1.11
+# google.golang.org/protobuf v1.32.0
+## explicit; go 1.17
+google.golang.org/protobuf/encoding/protodelim
 google.golang.org/protobuf/encoding/protojson
 google.golang.org/protobuf/encoding/prototext
 google.golang.org/protobuf/encoding/protowire