From ec6764197477e3d41919d954d6877b10e268f11f Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Thu, 2 Nov 2023 21:18:17 +0100 Subject: [PATCH] vendor: run `make vendor-update` --- go.mod | 134 +- go.sum | 302 +- .../go/compute/internal/version.go | 2 +- vendor/cloud.google.com/go/iam/CHANGES.md | 21 + .../go/iam/apiv1/iampb/iam_policy.pb.go | 2 +- .../go/iam/apiv1/iampb/options.pb.go | 2 +- .../go/iam/apiv1/iampb/policy.pb.go | 2 +- .../go/internal/.repo-metadata-full.json | 142 +- vendor/cloud.google.com/go/storage/CHANGES.md | 33 + vendor/cloud.google.com/go/storage/bucket.go | 51 +- vendor/cloud.google.com/go/storage/doc.go | 27 + .../go/storage/grpc_client.go | 150 +- vendor/cloud.google.com/go/storage/hmac.go | 5 +- .../go/storage/http_client.go | 168 +- .../go/storage/internal/apiv2/doc.go | 12 +- .../internal/apiv2/gapic_metadata.json | 10 + .../storage/internal/apiv2/storage_client.go | 111 +- .../internal/apiv2/storagepb/storage.pb.go | 4501 ++++++----- .../go/storage/internal/version.go | 2 +- vendor/cloud.google.com/go/storage/invoke.go | 46 +- vendor/cloud.google.com/go/storage/reader.go | 10 - vendor/cloud.google.com/go/storage/storage.go | 29 +- .../azure-sdk-for-go/sdk/azcore/CHANGELOG.md | 36 + .../Azure/azure-sdk-for-go/sdk/azcore/core.go | 18 + .../sdk/azcore/internal/exported/exported.go | 74 + .../sdk/azcore/internal/shared/constants.go | 2 +- .../sdk/azcore/internal/shared/shared.go | 29 +- .../sdk/azcore/policy/policy.go | 21 + .../sdk/azcore/runtime/pager.go | 41 + .../sdk/azcore/runtime/policy_bearer_token.go | 4 + .../sdk/azcore/runtime/policy_http_header.go | 3 +- .../azcore/runtime/policy_include_response.go | 5 +- .../azcore/runtime/policy_key_credential.go | 49 + .../sdk/azcore/runtime/policy_retry.go | 3 +- .../azcore/runtime/policy_sas_credential.go | 39 + .../sdk/azcore/runtime/request.go | 14 + .../runtime/transport_default_dialer_other.go | 15 + .../runtime/transport_default_dialer_wasm.go | 15 + .../runtime/transport_default_http_client.go | 4 +- .../sdk/azidentity/CHANGELOG.md | 43 + .../sdk/azidentity/TROUBLESHOOTING.md | 2 + .../sdk/azidentity/azidentity.go | 98 +- .../sdk/azidentity/azure_cli_credential.go | 129 +- .../azure-sdk-for-go/sdk/azidentity/ci.yml | 25 +- .../azidentity/client_assertion_credential.go | 26 +- .../client_certificate_credential.go | 30 +- .../azidentity/client_secret_credential.go | 28 +- .../sdk/azidentity/confidential_client.go | 156 + .../azidentity/default_azure_credential.go | 29 +- .../sdk/azidentity/device_code_credential.go | 52 +- .../azure-sdk-for-go/sdk/azidentity/errors.go | 13 +- .../interactive_browser_credential.go | 40 +- .../sdk/azidentity/managed_identity_client.go | 28 +- .../azidentity/managed_identity_credential.go | 26 +- .../sdk/azidentity/on_behalf_of_credential.go | 27 +- .../sdk/azidentity/public_client.go | 178 + .../azure-sdk-for-go/sdk/azidentity/syncer.go | 130 - .../sdk/azidentity/test-resources-pre.ps1 | 36 + .../sdk/azidentity/test-resources.bicep | 1 + .../username_password_credential.go | 37 +- .../sdk/azidentity/version.go | 2 +- .../sdk/azidentity/workload_identity.go | 4 +- .../sdk/internal/errorinfo/errorinfo.go | 30 + .../sdk/storage/azblob/CHANGELOG.md | 25 +- .../sdk/storage/azblob/README.md | 2 +- .../sdk/storage/azblob/appendblob/client.go | 41 +- .../sdk/storage/azblob/assets.json | 2 +- .../sdk/storage/azblob/blob/client.go | 207 +- .../sdk/storage/azblob/blob/constants.go | 6 + .../sdk/storage/azblob/blob/models.go | 10 +- .../storage/azblob/bloberror/error_codes.go | 1 + .../storage/azblob/blockblob/chunkwriting.go | 68 +- .../sdk/storage/azblob/blockblob/client.go | 66 +- .../sdk/storage/azblob/blockblob/constants.go | 13 + .../sdk/storage/azblob/blockblob/models.go | 25 +- .../sdk/storage/azblob/container/client.go | 46 +- .../sdk/storage/azblob/container/models.go | 28 + .../sdk/storage/azblob/container/responses.go | 3 + .../storage/azblob/internal/base/clients.go | 31 +- .../exported/shared_key_credential.go | 2 +- .../azblob/internal/exported/version.go | 2 +- .../internal/generated/appendblob_client.go | 19 +- .../azblob/internal/generated/autorest.md | 40 +- .../azblob/internal/generated/blob_client.go | 17 +- .../internal/generated/block_blob_client.go | 19 +- .../azblob/internal/generated/constants.go | 9 + .../internal/generated/container_client.go | 19 +- .../azblob/internal/generated/models.go | 76 + .../internal/generated/pageblob_client.go | 19 +- .../internal/generated/service_client.go | 19 +- .../generated/zz_appendblob_client.go | 44 +- .../internal/generated/zz_blob_client.go | 192 +- .../internal/generated/zz_blockblob_client.go | 72 +- .../azblob/internal/generated/zz_constants.go | 134 +- .../internal/generated/zz_container_client.go | 203 +- .../azblob/internal/generated/zz_models.go | 48 +- .../internal/generated/zz_models_serde.go | 28 +- .../internal/generated/zz_pageblob_client.go | 74 +- .../internal/generated/zz_response_types.go | 22 + .../internal/generated/zz_service_client.go | 80 +- .../azblob/internal/shared/batch_transfer.go | 16 +- .../azblob/internal/shared/buffer_manager.go | 70 + .../shared}/mmf_unix.go | 10 +- .../shared}/mmf_windows.go | 18 +- .../storage/azblob/internal/shared/shared.go | 9 + .../sdk/storage/azblob/pageblob/client.go | 33 +- .../sdk/storage/azblob/sas/account.go | 29 +- .../sdk/storage/azblob/sas/query_params.go | 13 +- .../sdk/storage/azblob/sas/service.go | 58 +- .../sdk/storage/azblob/service/client.go | 33 +- .../sdk/storage/azblob/service/models.go | 3 + .../github.com/aws/aws-sdk-go-v2/CHANGELOG.md | 1129 +++ vendor/github.com/aws/aws-sdk-go-v2/Makefile | 2 +- vendor/github.com/aws/aws-sdk-go-v2/README.md | 11 +- .../aws/aws-sdk-go-v2/aws/config.go | 4 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/protocol/eventstream/CHANGELOG.md | 8 + .../eventstream/go_module_metadata.go | 2 +- .../aws/retry/retryable_error.go | 16 +- .../aws/aws-sdk-go-v2/ci-find-smithy-go.sh | 10 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 54 + .../aws/aws-sdk-go-v2/config/config.go | 27 +- .../aws/aws-sdk-go-v2/config/env_config.go | 75 +- .../config/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/config/provider.go | 52 + .../aws/aws-sdk-go-v2/config/resolve.go | 27 +- .../aws/aws-sdk-go-v2/config/shared_config.go | 175 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 42 + .../credentials/go_module_metadata.go | 2 +- .../feature/ec2/imds/CHANGELOG.md | 21 + .../feature/ec2/imds/api_client.go | 18 + .../feature/ec2/imds/go_module_metadata.go | 2 +- .../ec2/imds/internal/config/resolvers.go | 16 + .../feature/s3/manager/CHANGELOG.md | 54 + .../feature/s3/manager/go_module_metadata.go | 2 +- .../internal/configsources/CHANGELOG.md | 17 + .../internal/configsources/endpoints.go | 57 + .../configsources/go_module_metadata.go | 2 +- .../endpoints/awsrulesfn/partitions.json | 7 + .../internal/endpoints/v2/CHANGELOG.md | 17 + .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 23 + .../internal/ini/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/ini_lexer.go | 4 + .../internal/ini/literal_tokens.go | 251 +- .../internal/ini/number_helper.go | 152 - .../aws-sdk-go-v2/internal/ini/value_util.go | 161 - .../aws/aws-sdk-go-v2/internal/ini/visitor.go | 25 +- .../aws-sdk-go-v2/internal/v4a/CHANGELOG.md | 17 + .../internal/v4a/go_module_metadata.go | 2 +- .../github.com/aws/aws-sdk-go-v2/modman.toml | 2 +- .../internal/accept-encoding/CHANGELOG.md | 8 + .../accept-encoding/go_module_metadata.go | 2 +- .../service/internal/checksum/CHANGELOG.md | 17 + .../internal/checksum/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 17 + .../presigned-url/go_module_metadata.go | 2 +- .../service/internal/s3shared/CHANGELOG.md | 17 + .../internal/s3shared/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/s3/CHANGELOG.md | 26 + .../aws-sdk-go-v2/service/s3/api_client.go | 1 + .../service/s3/api_op_AbortMultipartUpload.go | 8 +- .../s3/api_op_CompleteMultipartUpload.go | 8 +- .../service/s3/api_op_CopyObject.go | 37 +- .../service/s3/api_op_CreateBucket.go | 11 +- .../s3/api_op_CreateMultipartUpload.go | 19 +- .../service/s3/api_op_DeleteObject.go | 14 +- .../service/s3/api_op_DeleteObjects.go | 8 +- ...api_op_GetBucketAccelerateConfiguration.go | 8 +- .../service/s3/api_op_GetObject.go | 8 +- .../service/s3/api_op_GetObjectAcl.go | 8 +- .../service/s3/api_op_GetObjectAttributes.go | 8 +- .../service/s3/api_op_GetObjectLegalHold.go | 8 +- .../service/s3/api_op_GetObjectRetention.go | 8 +- .../service/s3/api_op_GetObjectTagging.go | 8 +- .../service/s3/api_op_GetObjectTorrent.go | 8 +- .../service/s3/api_op_HeadObject.go | 8 +- .../service/s3/api_op_ListMultipartUploads.go | 8 +- .../service/s3/api_op_ListObjectVersions.go | 8 +- .../service/s3/api_op_ListParts.go | 8 +- .../service/s3/api_op_PutBucketEncryption.go | 18 +- .../service/s3/api_op_PutBucketReplication.go | 24 +- .../service/s3/api_op_PutBucketTagging.go | 23 +- .../service/s3/api_op_PutBucketWebsite.go | 2 +- .../service/s3/api_op_PutObject.go | 14 +- .../service/s3/api_op_PutObjectAcl.go | 8 +- .../service/s3/api_op_PutObjectLegalHold.go | 8 +- .../s3/api_op_PutObjectLockConfiguration.go | 8 +- .../service/s3/api_op_PutObjectRetention.go | 8 +- .../service/s3/api_op_PutObjectTagging.go | 40 +- .../service/s3/api_op_PutPublicAccessBlock.go | 2 +- .../service/s3/api_op_RestoreObject.go | 8 +- .../service/s3/api_op_UploadPart.go | 8 +- .../service/s3/api_op_UploadPartCopy.go | 8 +- .../s3/api_op_WriteGetObjectResponse.go | 6 +- .../aws/aws-sdk-go-v2/service/s3/endpoints.go | 20 + .../aws-sdk-go-v2/service/s3/generated.json | 1 + .../service/s3/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/s3/types/enums.go | 10 +- .../aws-sdk-go-v2/service/s3/types/types.go | 23 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 31 + .../aws-sdk-go-v2/service/sso/api_client.go | 1 + .../aws-sdk-go-v2/service/sso/endpoints.go | 312 +- .../aws-sdk-go-v2/service/sso/generated.json | 1 + .../service/sso/go_module_metadata.go | 2 +- .../sso/internal/endpoints/endpoints.go | 26 + .../service/ssooidc/CHANGELOG.md | 35 + .../service/ssooidc/api_client.go | 1 + .../service/ssooidc/endpoints.go | 312 +- .../service/ssooidc/generated.json | 1 + .../service/ssooidc/go_module_metadata.go | 2 +- .../ssooidc/internal/endpoints/endpoints.go | 34 + .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 27 + .../aws-sdk-go-v2/service/sts/api_client.go | 1 + .../aws-sdk-go-v2/service/sts/endpoints.go | 20 + .../aws-sdk-go-v2/service/sts/generated.json | 1 + .../service/sts/go_module_metadata.go | 2 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 1291 +++- .../aws/aws-sdk-go/aws/session/env_config.go | 28 + .../aws/aws-sdk-go/aws/session/session.go | 8 + .../aws-sdk-go/aws/session/shared_config.go | 35 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws-sdk-go/internal/ini/literal_tokens.go | 57 +- .../aws/aws-sdk-go/internal/ini/visitor.go | 6 +- vendor/github.com/aws/smithy-go/CHANGELOG.md | 12 + vendor/github.com/aws/smithy-go/README.md | 15 + .../aws/smithy-go/go_module_metadata.go | 2 +- .../http/middleware_header_comment.go | 81 + .../cpuguy83/go-md2man/v2/md2man/md2man.go | 6 +- .../cpuguy83/go-md2man/v2/md2man/roff.go | 30 +- vendor/github.com/go-logr/logr/README.md | 113 +- vendor/github.com/go-logr/logr/SECURITY.md | 18 + vendor/github.com/go-logr/logr/funcr/funcr.go | 48 +- vendor/github.com/go-logr/logr/logr.go | 35 +- .../github.com/gogo/protobuf/jsonpb/jsonpb.go | 1435 ++++ vendor/github.com/google/go-cmp/LICENSE | 27 - .../github.com/google/go-cmp/cmp/compare.go | 669 -- .../google/go-cmp/cmp/export_panic.go | 16 - .../google/go-cmp/cmp/export_unsafe.go | 36 - .../go-cmp/cmp/internal/diff/debug_disable.go | 18 - .../go-cmp/cmp/internal/diff/debug_enable.go | 123 - .../google/go-cmp/cmp/internal/diff/diff.go | 402 - .../google/go-cmp/cmp/internal/flags/flags.go | 9 - .../go-cmp/cmp/internal/function/func.go | 99 - .../google/go-cmp/cmp/internal/value/name.go | 164 - .../cmp/internal/value/pointer_purego.go | 34 - .../cmp/internal/value/pointer_unsafe.go | 37 - .../google/go-cmp/cmp/internal/value/sort.go | 106 - .../github.com/google/go-cmp/cmp/options.go | 554 -- vendor/github.com/google/go-cmp/cmp/path.go | 380 - vendor/github.com/google/go-cmp/cmp/report.go | 54 - .../google/go-cmp/cmp/report_compare.go | 433 -- .../google/go-cmp/cmp/report_references.go | 264 - .../google/go-cmp/cmp/report_reflect.go | 414 -- .../google/go-cmp/cmp/report_slices.go | 614 -- .../google/go-cmp/cmp/report_text.go | 432 -- .../google/go-cmp/cmp/report_value.go | 121 - vendor/github.com/google/s2a-go/README.md | 7 +- .../internal/handshaker/service/service.go | 53 +- .../s2a-go/internal/record/ticketsender.go | 8 +- .../google/s2a-go/internal/v2/s2av2.go | 28 +- .../github.com/google/s2a-go/retry/retry.go | 4 +- vendor/github.com/google/s2a-go/s2a.go | 27 +- .../github.com/google/s2a-go/s2a_options.go | 7 + .../s2a-go/testdata/mds_client_cert.pem | 19 + .../google/s2a-go/testdata/mds_client_key.pem | 28 + .../google/s2a-go/testdata/mds_root_cert.pem | 21 + .../s2a-go/testdata/mds_server_cert.pem | 21 + .../google/s2a-go/testdata/mds_server_key.pem | 28 + .../s2a-go/testdata/self_signed_cert.pem | 19 + .../s2a-go/testdata/self_signed_key.pem | 28 + vendor/github.com/google/uuid/CHANGELOG.md | 11 + vendor/github.com/google/uuid/CONTRIBUTING.md | 2 +- vendor/github.com/google/uuid/uuid.go | 26 +- .../client/client.go | 42 +- .../client/util/util.go | 9 + .../github.com/json-iterator/go/.codecov.yml | 3 + vendor/github.com/json-iterator/go/.gitignore | 4 + .../github.com/json-iterator/go/.travis.yml | 14 + vendor/github.com/json-iterator/go/Gopkg.lock | 21 + vendor/github.com/json-iterator/go/Gopkg.toml | 26 + vendor/github.com/json-iterator/go/LICENSE | 21 + vendor/github.com/json-iterator/go/README.md | 85 + vendor/github.com/json-iterator/go/adapter.go | 150 + vendor/github.com/json-iterator/go/any.go | 325 + .../github.com/json-iterator/go/any_array.go | 278 + .../github.com/json-iterator/go/any_bool.go | 137 + .../github.com/json-iterator/go/any_float.go | 83 + .../github.com/json-iterator/go/any_int32.go | 74 + .../github.com/json-iterator/go/any_int64.go | 74 + .../json-iterator/go/any_invalid.go | 82 + vendor/github.com/json-iterator/go/any_nil.go | 69 + .../github.com/json-iterator/go/any_number.go | 123 + .../github.com/json-iterator/go/any_object.go | 374 + vendor/github.com/json-iterator/go/any_str.go | 166 + .../github.com/json-iterator/go/any_uint32.go | 74 + .../github.com/json-iterator/go/any_uint64.go | 74 + vendor/github.com/json-iterator/go/build.sh | 12 + vendor/github.com/json-iterator/go/config.go | 375 + .../go/fuzzy_mode_convert_table.md | 7 + vendor/github.com/json-iterator/go/iter.go | 349 + .../github.com/json-iterator/go/iter_array.go | 64 + .../github.com/json-iterator/go/iter_float.go | 342 + .../github.com/json-iterator/go/iter_int.go | 346 + .../json-iterator/go/iter_object.go | 267 + .../github.com/json-iterator/go/iter_skip.go | 130 + .../json-iterator/go/iter_skip_sloppy.go | 163 + .../json-iterator/go/iter_skip_strict.go | 99 + .../github.com/json-iterator/go/iter_str.go | 215 + .../github.com/json-iterator/go/jsoniter.go | 18 + vendor/github.com/json-iterator/go/pool.go | 42 + vendor/github.com/json-iterator/go/reflect.go | 337 + .../json-iterator/go/reflect_array.go | 104 + .../json-iterator/go/reflect_dynamic.go | 70 + .../json-iterator/go/reflect_extension.go | 483 ++ .../json-iterator/go/reflect_json_number.go | 112 + .../go/reflect_json_raw_message.go | 76 + .../json-iterator/go/reflect_map.go | 346 + .../json-iterator/go/reflect_marshaler.go | 225 + .../json-iterator/go/reflect_native.go | 453 ++ .../json-iterator/go/reflect_optional.go | 129 + .../json-iterator/go/reflect_slice.go | 99 + .../go/reflect_struct_decoder.go | 1097 +++ .../go/reflect_struct_encoder.go | 211 + vendor/github.com/json-iterator/go/stream.go | 210 + .../json-iterator/go/stream_float.go | 111 + .../github.com/json-iterator/go/stream_int.go | 190 + .../github.com/json-iterator/go/stream_str.go | 372 + vendor/github.com/json-iterator/go/test.sh | 12 + .../klauspost/compress/.goreleaser.yml | 20 +- .../github.com/klauspost/compress/README.md | 19 + .../klauspost/compress/flate/deflate.go | 29 + .../klauspost/compress/flate/fast_encoder.go | 23 - .../klauspost/compress/flate/inflate.go | 66 +- .../klauspost/compress/flate/inflate_gen.go | 34 +- .../klauspost/compress/flate/level5.go | 398 + .../compress/flate/matchlen_amd64.go | 16 + .../klauspost/compress/flate/matchlen_amd64.s | 68 + .../compress/flate/matchlen_generic.go | 33 + .../klauspost/compress/fse/bitwriter.go | 3 +- .../klauspost/compress/fse/compress.go | 3 +- .../klauspost/compress/gzip/gunzip.go | 1 + .../klauspost/compress/gzip/gzip.go | 21 + .../klauspost/compress/huff0/bitwriter.go | 3 +- .../klauspost/compress/huff0/compress.go | 20 +- .../github.com/klauspost/compress/s2/dict.go | 19 + .../klauspost/compress/s2/encode.go | 2 +- .../klauspost/compress/s2/encode_best.go | 3 + .../klauspost/compress/s2/encode_go.go | 2 + .../klauspost/compress/s2/encodeblock_amd64.s | 1610 ++-- .../github.com/klauspost/compress/s2/index.go | 20 +- .../klauspost/compress/zstd/bitreader.go | 34 +- .../klauspost/compress/zstd/bitwriter.go | 3 +- .../klauspost/compress/zstd/blockenc.go | 29 +- .../klauspost/compress/zstd/dict.go | 379 +- .../klauspost/compress/zstd/enc_best.go | 11 +- .../klauspost/compress/zstd/encoder.go | 13 +- .../klauspost/compress/zstd/frameenc.go | 4 +- .../klauspost/compress/zstd/seqdec.go | 17 +- .../klauspost/compress/zstd/seqdec_amd64.s | 128 +- .../klauspost/compress/zstd/seqdec_generic.go | 2 +- .../klauspost/compress/zstd/snappy.go | 5 +- .../github.com/mattn/go-isatty/isatty_bsd.go | 3 +- .../mattn/go-isatty/isatty_others.go | 5 +- .../mattn/go-isatty/isatty_tcgets.go | 3 +- .../{ => v2}/LICENSE | 0 .../{ => v2}/NOTICE | 0 .../{ => v2}/pbutil/.gitignore | 0 .../{ => v2}/pbutil/Makefile | 0 .../{ => v2}/pbutil/decode.go | 16 +- .../{ => v2}/pbutil/doc.go | 0 .../{ => v2}/pbutil/encode.go | 5 +- .../modern-go/concurrent/.gitignore | 1 + .../modern-go/concurrent/.travis.yml | 14 + .../github.com/modern-go/concurrent/LICENSE | 201 + .../github.com/modern-go/concurrent/README.md | 49 + .../modern-go/concurrent/executor.go | 14 + .../modern-go/concurrent/go_above_19.go | 15 + .../modern-go/concurrent/go_below_19.go | 33 + vendor/github.com/modern-go/concurrent/log.go | 13 + .../github.com/modern-go/concurrent/test.sh | 12 + .../concurrent/unbounded_executor.go | 119 + .../github.com/modern-go/reflect2/.gitignore | 2 + .../github.com/modern-go/reflect2/.travis.yml | 15 + .../github.com/modern-go/reflect2/Gopkg.lock | 9 + .../github.com/modern-go/reflect2/Gopkg.toml | 31 + vendor/github.com/modern-go/reflect2/LICENSE | 201 + .../github.com/modern-go/reflect2/README.md | 71 + .../modern-go/reflect2/go_above_118.go | 23 + .../modern-go/reflect2/go_above_19.go | 17 + .../modern-go/reflect2/go_below_118.go | 21 + .../github.com/modern-go/reflect2/reflect2.go | 300 + .../modern-go/reflect2/reflect2_amd64.s | 0 .../modern-go/reflect2/reflect2_kind.go | 30 + .../modern-go/reflect2/relfect2_386.s | 0 .../modern-go/reflect2/relfect2_amd64p32.s | 0 .../modern-go/reflect2/relfect2_arm.s | 0 .../modern-go/reflect2/relfect2_arm64.s | 0 .../modern-go/reflect2/relfect2_mips64x.s | 0 .../modern-go/reflect2/relfect2_mipsx.s | 0 .../modern-go/reflect2/relfect2_ppc64x.s | 0 .../modern-go/reflect2/relfect2_s390x.s | 0 .../modern-go/reflect2/safe_field.go | 58 + .../github.com/modern-go/reflect2/safe_map.go | 101 + .../modern-go/reflect2/safe_slice.go | 92 + .../modern-go/reflect2/safe_struct.go | 29 + .../modern-go/reflect2/safe_type.go | 78 + .../github.com/modern-go/reflect2/type_map.go | 70 + .../modern-go/reflect2/unsafe_array.go | 65 + .../modern-go/reflect2/unsafe_eface.go | 59 + .../modern-go/reflect2/unsafe_field.go | 74 + .../modern-go/reflect2/unsafe_iface.go | 64 + .../modern-go/reflect2/unsafe_link.go | 76 + .../modern-go/reflect2/unsafe_map.go | 130 + .../modern-go/reflect2/unsafe_ptr.go | 46 + .../modern-go/reflect2/unsafe_slice.go | 177 + .../modern-go/reflect2/unsafe_struct.go | 59 + .../modern-go/reflect2/unsafe_type.go | 85 + .../client_golang/prometheus/counter.go | 26 +- .../client_golang/prometheus/desc.go | 28 +- .../prometheus/expvar_collector.go | 2 +- .../client_golang/prometheus/gauge.go | 8 +- .../client_golang/prometheus/histogram.go | 118 +- .../prometheus/internal/difflib.go | 2 +- .../client_golang/prometheus/labels.go | 58 +- .../client_golang/prometheus/metric.go | 3 + .../client_golang/prometheus/promauto/auto.go | 6 +- .../client_golang/prometheus/registry.go | 6 +- .../client_golang/prometheus/summary.go | 41 +- .../client_golang/prometheus/value.go | 55 +- .../client_golang/prometheus/vec.go | 106 +- .../prometheus/client_model/go/metrics.pb.go | 354 +- .../prometheus/common/config/http_config.go | 32 +- .../prometheus/common/expfmt/decode.go | 2 +- .../prometheus/common/expfmt/encode.go | 2 +- .../prometheus/procfs/Makefile.common | 2 +- .../prometheus/procfs/fs_statfs_notype.go | 4 +- .../prometheus/procfs/fs_statfs_type.go | 4 +- .../prometheus/procfs/mountstats.go | 83 +- .../prometheus/procfs/proc_fdinfo.go | 8 +- .../github.com/prometheus/procfs/proc_maps.go | 20 +- .../prometheus/procfs/proc_status.go | 21 +- .../prometheus/prometheus/config/config.go | 9 + .../model/histogram/float_histogram.go | 429 +- .../prometheus/model/histogram/generic.go | 14 +- .../model/labels/labels_stringlabels.go | 29 +- .../model/textparse/protobufparse.go | 39 +- .../prompb/io/prometheus/client/metrics.pb.go | 3 + .../prompb/io/prometheus/client/metrics.proto | 3 + .../prometheus/prometheus/scrape/manager.go | 13 +- .../prometheus/prometheus/scrape/scrape.go | 18 +- .../prometheus/prometheus/storage/buffer.go | 30 +- .../prometheus/storage/remote/codec.go | 64 +- .../prometheus/normalize_label.go | 41 + .../prometheus/normalize_name.go | 251 + .../prometheusremotewrite/helper.go | 560 ++ .../prometheusremotewrite/histograms.go | 158 + .../prometheusremotewrite/metrics_to_prw.go | 114 + .../number_data_points.go | 104 + .../storage/remote/queue_manager.go | 5 + .../storage/remote/write_handler.go | 61 +- .../prometheus/prometheus/storage/series.go | 237 +- .../prometheus/tsdb/chunkenc/chunk.go | 30 +- .../tsdb/chunkenc/float_histogram.go | 175 +- .../prometheus/tsdb/chunkenc/histogram.go | 200 +- .../tsdb/chunkenc/histogram_meta.go | 11 +- .../prometheus/tsdb/chunkenc/xor.go | 16 +- .../prometheus/tsdb/chunks/chunks.go | 77 +- .../{tsdbutil/chunks.go => chunks/samples.go} | 72 +- .../prometheus/prometheus/tsdb/db.go | 8 +- .../prometheus/prometheus/tsdb/head.go | 174 +- .../prometheus/prometheus/tsdb/head_append.go | 416 +- .../prometheus/prometheus/tsdb/head_read.go | 119 +- .../prometheus/prometheus/tsdb/head_wal.go | 19 +- .../prometheus/prometheus/tsdb/querier.go | 135 +- .../prometheus/tsdb/tsdbutil/histogram.go | 12 +- .../collector/pdata/LICENSE | 202 + .../collector/pdata/internal/.gitignore | 2 + .../collector/pdata/internal/data/bytesid.go | 45 + .../collector/logs/v1/logs_service.pb.go | 844 +++ .../metrics/v1/metrics_service.pb.go | 844 +++ .../collector/trace/v1/trace_service.pb.go | 843 +++ .../data/protogen/common/v1/common.pb.go | 1721 +++++ .../internal/data/protogen/logs/v1/logs.pb.go | 1762 +++++ .../data/protogen/metrics/v1/metrics.pb.go | 6550 +++++++++++++++++ .../data/protogen/resource/v1/resource.pb.go | 381 + .../data/protogen/trace/v1/trace.pb.go | 2884 ++++++++ .../collector/pdata/internal/data/spanid.go | 79 + .../collector/pdata/internal/data/traceid.go | 79 + .../internal/generated_wrapper_byteslice.go | 24 + .../generated_wrapper_float64slice.go | 24 + .../generated_wrapper_instrumentationscope.go | 43 + .../internal/generated_wrapper_resource.go | 41 + .../internal/generated_wrapper_uint64slice.go | 24 + .../pdata/internal/json/attribute.go | 110 + .../collector/pdata/internal/json/enum.go | 29 + .../collector/pdata/internal/json/json.go | 22 + .../collector/pdata/internal/json/number.go | 103 + .../collector/pdata/internal/json/resource.go | 27 + .../collector/pdata/internal/json/scope.go | 31 + .../collector/pdata/internal/otlp/logs.go | 19 + .../collector/pdata/internal/otlp/metrics.go | 19 + .../collector/pdata/internal/otlp/traces.go | 19 + .../collector/pdata/internal/state.go | 22 + .../collector/pdata/internal/wrapper_logs.go | 46 + .../collector/pdata/internal/wrapper_map.go | 38 + .../pdata/internal/wrapper_metrics.go | 46 + .../collector/pdata/internal/wrapper_slice.go | 41 + .../pdata/internal/wrapper_traces.go | 46 + .../pdata/internal/wrapper_tracestate.go | 33 + .../collector/pdata/internal/wrapper_value.go | 37 + .../pdata/pcommon/generated_byteslice.go | 108 + .../pdata/pcommon/generated_float64slice.go | 108 + .../pcommon/generated_instrumentationscope.go | 98 + .../pdata/pcommon/generated_resource.go | 74 + .../pdata/pcommon/generated_uint64slice.go | 108 + .../collector/pdata/pcommon/map.go | 280 + .../collector/pdata/pcommon/slice.go | 167 + .../collector/pdata/pcommon/spanid.go | 36 + .../collector/pdata/pcommon/timestamp.go | 27 + .../collector/pdata/pcommon/trace_state.go | 50 + .../collector/pdata/pcommon/traceid.go | 37 + .../collector/pdata/pcommon/value.go | 489 ++ .../pdata/pmetric/aggregation_temporality.go | 34 + .../collector/pdata/pmetric/encoding.go | 30 + .../pdata/pmetric/exemplar_value_type.go | 27 + .../pdata/pmetric/generated_exemplar.go | 143 + .../pdata/pmetric/generated_exemplarslice.go | 137 + .../pmetric/generated_exponentialhistogram.go | 70 + ...generated_exponentialhistogramdatapoint.go | 232 + ...ed_exponentialhistogramdatapointbuckets.go | 70 + ...ated_exponentialhistogramdatapointslice.go | 153 + .../pdata/pmetric/generated_gauge.go | 57 + .../pdata/pmetric/generated_histogram.go | 69 + .../pmetric/generated_histogramdatapoint.go | 205 + .../generated_histogramdatapointslice.go | 153 + .../pdata/pmetric/generated_metric.go | 249 + .../pdata/pmetric/generated_metricslice.go | 153 + .../pmetric/generated_numberdatapoint.go | 145 + .../pmetric/generated_numberdatapointslice.go | 153 + .../pmetric/generated_resourcemetrics.go | 76 + .../pmetric/generated_resourcemetricsslice.go | 153 + .../pdata/pmetric/generated_scopemetrics.go | 76 + .../pmetric/generated_scopemetricsslice.go | 153 + .../collector/pdata/pmetric/generated_sum.go | 81 + .../pdata/pmetric/generated_summary.go | 57 + .../pmetric/generated_summarydatapoint.go | 124 + .../generated_summarydatapointslice.go | 153 + ...nerated_summarydatapointvalueatquantile.go | 75 + ...ed_summarydatapointvalueatquantileslice.go | 153 + .../collector/pdata/pmetric/json.go | 431 ++ .../pdata/pmetric/metric_data_point_flags.go | 28 + .../collector/pdata/pmetric/metric_type.go | 36 + .../collector/pdata/pmetric/metrics.go | 95 + .../pmetric/number_data_point_value_type.go | 27 + .../collector/pdata/pmetric/pb.go | 31 + .../generated_exportpartialsuccess.go | 75 + .../pdata/pmetric/pmetricotlp/grpc.go | 90 + .../pdata/pmetric/pmetricotlp/request.go | 74 + .../pdata/pmetric/pmetricotlp/response.go | 87 + .../collector/semconv/LICENSE | 202 + .../semconv/v1.6.1/generated_resource.go | 991 +++ .../semconv/v1.6.1/generated_trace.go | 1587 ++++ .../collector/semconv/v1.6.1/nonstandard.go | 11 + .../collector/semconv/v1.6.1/schema.go | 9 + .../net/http/otelhttp/handler.go | 2 +- .../otelhttp/internal/semconvutil/httpconv.go | 98 +- .../net/http/otelhttp/version.go | 2 +- vendor/go.opentelemetry.io/otel/.gitignore | 1 + vendor/go.opentelemetry.io/otel/.golangci.yml | 5 - vendor/go.opentelemetry.io/otel/CHANGELOG.md | 72 +- vendor/go.opentelemetry.io/otel/Makefile | 2 +- vendor/go.opentelemetry.io/otel/README.md | 5 - .../go.opentelemetry.io/otel/requirements.txt | 2 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 18 +- vendor/go.uber.org/goleak/.golangci.yml | 28 + vendor/go.uber.org/goleak/CHANGELOG.md | 17 +- vendor/go.uber.org/goleak/Makefile | 50 +- vendor/go.uber.org/goleak/README.md | 4 +- vendor/go.uber.org/goleak/glide.yaml | 8 - .../go.uber.org/goleak/internal/stack/scan.go | 56 + .../goleak/internal/stack/stacks.go | 249 +- vendor/go.uber.org/goleak/leaks.go | 6 + vendor/go.uber.org/goleak/options.go | 28 +- vendor/go.uber.org/goleak/tracestack_new.go | 10 +- vendor/go.uber.org/multierr/.codecov.yml | 15 + vendor/go.uber.org/multierr/.gitignore | 4 + vendor/go.uber.org/multierr/CHANGELOG.md | 95 + vendor/go.uber.org/multierr/LICENSE.txt | 19 + vendor/go.uber.org/multierr/Makefile | 38 + vendor/go.uber.org/multierr/README.md | 43 + vendor/go.uber.org/multierr/error.go | 646 ++ .../go.uber.org/multierr/error_post_go120.go | 48 + .../go.uber.org/multierr/error_pre_go120.go | 79 + .../x/crypto/chacha20/chacha_arm64.go | 4 +- .../x/crypto/chacha20/chacha_arm64.s | 4 +- .../x/crypto/chacha20/chacha_noasm.go | 4 +- .../golang.org/x/crypto/cryptobyte/builder.go | 5 + .../golang.org/x/crypto/cryptobyte/string.go | 11 + vendor/golang.org/x/net/context/context.go | 56 - vendor/golang.org/x/net/context/go17.go | 73 - vendor/golang.org/x/net/context/go19.go | 21 - vendor/golang.org/x/net/context/pre_go17.go | 301 - vendor/golang.org/x/net/context/pre_go19.go | 110 - vendor/golang.org/x/net/http2/Dockerfile | 51 - vendor/golang.org/x/net/http2/Makefile | 3 - vendor/golang.org/x/net/http2/server.go | 86 +- vendor/golang.org/x/net/http2/transport.go | 3 +- .../clientcredentials/clientcredentials.go | 6 +- vendor/golang.org/x/oauth2/deviceauth.go | 198 + vendor/golang.org/x/oauth2/google/default.go | 31 +- vendor/golang.org/x/oauth2/google/google.go | 46 +- .../google/internal/externalaccount/aws.go | 47 +- .../externalaccount/basecredentials.go | 45 +- .../externalaccount/executablecredsource.go | 4 + .../externalaccount/filecredsource.go | 4 + .../google/internal/externalaccount/header.go | 64 + .../internal/externalaccount/urlcredsource.go | 4 + .../externalaccountauthorizeduser.go | 114 + .../clientauth.go | 8 +- .../sts_exchange.go | 42 +- vendor/golang.org/x/oauth2/internal/token.go | 70 +- vendor/golang.org/x/oauth2/oauth2.go | 33 +- vendor/golang.org/x/oauth2/pkce.go | 68 + vendor/golang.org/x/oauth2/token.go | 2 +- vendor/golang.org/x/sys/cpu/cpu.go | 5 +- vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 2 +- vendor/golang.org/x/sys/cpu/cpu_x86.go | 7 + vendor/golang.org/x/sys/cpu/hwcap_linux.go | 4 +- .../sys/internal/unsafeheader/unsafeheader.go | 30 - vendor/golang.org/x/sys/unix/mkerrors.sh | 1 + vendor/golang.org/x/sys/unix/ptrace_darwin.go | 6 - vendor/golang.org/x/sys/unix/ptrace_ios.go | 6 - vendor/golang.org/x/sys/unix/syscall_aix.go | 2 - .../golang.org/x/sys/unix/syscall_darwin.go | 186 - .../x/sys/unix/syscall_darwin_amd64.go | 1 - .../x/sys/unix/syscall_darwin_arm64.go | 1 - .../x/sys/unix/syscall_dragonfly.go | 198 - .../golang.org/x/sys/unix/syscall_freebsd.go | 192 - vendor/golang.org/x/sys/unix/syscall_linux.go | 136 +- .../golang.org/x/sys/unix/syscall_netbsd.go | 261 - .../golang.org/x/sys/unix/syscall_openbsd.go | 74 - .../golang.org/x/sys/unix/syscall_solaris.go | 18 - vendor/golang.org/x/sys/unix/syscall_unix.go | 3 + .../x/sys/unix/syscall_zos_s390x.go | 1 - vendor/golang.org/x/sys/unix/zerrors_linux.go | 26 + .../x/sys/unix/zerrors_linux_386.go | 2 + .../x/sys/unix/zerrors_linux_amd64.go | 2 + .../x/sys/unix/zerrors_linux_arm.go | 2 + .../x/sys/unix/zerrors_linux_arm64.go | 2 + .../x/sys/unix/zerrors_linux_loong64.go | 4 + .../x/sys/unix/zerrors_linux_mips.go | 2 + .../x/sys/unix/zerrors_linux_mips64.go | 2 + .../x/sys/unix/zerrors_linux_mips64le.go | 2 + .../x/sys/unix/zerrors_linux_mipsle.go | 2 + .../x/sys/unix/zerrors_linux_ppc.go | 2 + .../x/sys/unix/zerrors_linux_ppc64.go | 2 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2 + .../x/sys/unix/zerrors_linux_riscv64.go | 2 + .../x/sys/unix/zerrors_linux_s390x.go | 2 + .../x/sys/unix/zerrors_linux_sparc64.go | 2 + .../golang.org/x/sys/unix/zsyscall_aix_ppc.go | 22 - .../x/sys/unix/zsyscall_aix_ppc64.go | 22 - .../x/sys/unix/zsyscall_darwin_amd64.go | 40 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 149 - .../x/sys/unix/zsyscall_darwin_arm64.go | 40 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 149 - .../x/sys/unix/zsyscall_dragonfly_amd64.go | 22 - .../x/sys/unix/zsyscall_freebsd_386.go | 22 - .../x/sys/unix/zsyscall_freebsd_amd64.go | 22 - .../x/sys/unix/zsyscall_freebsd_arm.go | 22 - .../x/sys/unix/zsyscall_freebsd_arm64.go | 22 - .../x/sys/unix/zsyscall_freebsd_riscv64.go | 22 - .../x/sys/unix/zsyscall_illumos_amd64.go | 10 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 42 +- .../x/sys/unix/zsyscall_netbsd_386.go | 22 - .../x/sys/unix/zsyscall_netbsd_amd64.go | 22 - .../x/sys/unix/zsyscall_netbsd_arm.go | 22 - .../x/sys/unix/zsyscall_netbsd_arm64.go | 22 - .../x/sys/unix/zsyscall_openbsd_386.go | 32 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 22 - .../x/sys/unix/zsyscall_openbsd_arm.go | 32 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 32 +- .../x/sys/unix/zsyscall_openbsd_mips64.go | 32 +- .../x/sys/unix/zsyscall_openbsd_ppc64.go | 32 +- .../x/sys/unix/zsyscall_openbsd_riscv64.go | 32 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 256 +- .../x/sys/unix/zsyscall_zos_s390x.go | 11 - .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_loong64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 23 +- .../x/sys/unix/ztypes_linux_riscv64.go | 4 + .../golang.org/x/sys/windows/exec_windows.go | 91 +- .../x/sys/windows/security_windows.go | 21 +- .../x/sys/windows/syscall_windows.go | 53 +- .../golang.org/x/sys/windows/types_windows.go | 7 + .../x/sys/windows/zsyscall_windows.go | 54 +- vendor/golang.org/x/text/unicode/norm/trie.go | 2 +- .../iamcredentials/v1/iamcredentials-gen.go | 25 +- .../google.golang.org/api/internal/creds.go | 5 +- vendor/google.golang.org/api/internal/s2a.go | 2 +- .../api/internal/settings.go | 17 + .../google.golang.org/api/internal/version.go | 2 +- .../option/internaloption/internaloption.go | 13 + .../api/storage/v1/storage-api.json | 814 +- .../api/storage/v1/storage-gen.go | 2706 ++++++- .../api/transport/grpc/dial.go | 9 - .../api/transport/grpc/dial_appengine.go | 32 - .../api/transport/http/dial.go | 17 +- .../api/transport/http/dial_appengine.go | 21 - .../google.golang.org/appengine/.travis.yml | 18 - .../appengine/CONTRIBUTING.md | 6 +- vendor/google.golang.org/appengine/README.md | 6 +- .../google.golang.org/appengine/appengine.go | 23 +- .../appengine/appengine_vm.go | 12 +- .../google.golang.org/appengine/identity.go | 3 +- .../appengine/internal/api.go | 357 +- .../appengine/internal/api_classic.go | 29 +- .../appengine/internal/api_common.go | 50 +- .../appengine/internal/identity.go | 7 +- .../appengine/internal/identity_classic.go | 23 +- .../appengine/internal/identity_flex.go | 1 + .../appengine/internal/identity_vm.go | 20 +- .../appengine/internal/main.go | 1 + .../appengine/internal/main_vm.go | 3 +- .../internal/socket/socket_service.pb.go | 2822 ------- .../internal/socket/socket_service.proto | 460 -- .../appengine/internal/transaction.go | 10 +- .../google.golang.org/appengine/namespace.go | 3 +- .../google.golang.org/appengine/socket/doc.go | 10 - .../appengine/socket/socket_classic.go | 290 - .../appengine/socket/socket_vm.go | 64 - vendor/google.golang.org/appengine/timeout.go | 2 +- .../appengine/travis_install.sh | 18 - .../appengine/travis_test.sh | 12 - .../appengine/urlfetch/urlfetch.go | 9 +- .../api/annotations/field_info.pb.go | 295 + vendor/google.golang.org/grpc/README.md | 60 +- .../grpc/attributes/attributes.go | 35 +- .../grpc/balancer/balancer.go | 62 +- .../grpc/balancer/base/balancer.go | 22 +- .../grpclb/grpc_lb_v1/load_balancer.pb.go | 2 +- .../grpc/balancer/grpclb/grpclb.go | 81 +- .../grpc/balancer/grpclb/grpclb_picker.go | 9 - .../balancer/grpclb/grpclb_remote_balancer.go | 71 +- .../grpc/balancer/grpclb/grpclb_util.go | 113 +- .../grpc/balancer_conn_wrappers.go | 75 +- .../grpc_binarylog_v1/binarylog.pb.go | 2 +- vendor/google.golang.org/grpc/call.go | 11 +- vendor/google.golang.org/grpc/clientconn.go | 157 +- vendor/google.golang.org/grpc/codec.go | 8 +- .../internal/proto/grpc_gcp/altscontext.pb.go | 2 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 2 +- .../grpc_gcp/transport_security_common.pb.go | 2 +- vendor/google.golang.org/grpc/dialoptions.go | 19 +- .../grpc/encoding/encoding.go | 17 +- .../grpc/encoding/proto/proto.go | 4 +- .../grpc/grpclog/component.go | 40 +- .../google.golang.org/grpc/grpclog/grpclog.go | 30 +- .../google.golang.org/grpc/grpclog/logger.go | 30 +- .../grpc/grpclog/loggerv2.go | 56 +- vendor/google.golang.org/grpc/interceptor.go | 12 +- .../grpc/internal/backoff/backoff.go | 36 + .../balancer/gracefulswitch/gracefulswitch.go | 59 +- .../grpc/internal/balancerload/load.go | 4 +- .../grpc/internal/binarylog/method_logger.go | 4 +- .../grpc/internal/buffer/unbounded.go | 18 +- .../grpc/internal/channelz/funcs.go | 69 +- .../grpc/internal/channelz/logging.go | 12 +- .../grpc/internal/channelz/types.go | 5 + .../grpc/internal/channelz/util_linux.go | 2 +- .../grpc/internal/channelz/util_nonlinux.go | 2 +- .../grpc/internal/credentials/credentials.go | 8 +- .../grpc/internal/envconfig/envconfig.go | 9 +- .../grpc/internal/grpclog/grpclog.go | 40 +- .../grpc/internal/grpclog/prefixLogger.go | 8 +- .../internal/grpcsync/callback_serializer.go | 54 +- .../grpc/internal/grpcsync/pubsub.go | 43 +- .../grpc/{ => internal/idle}/idle.go | 188 +- .../grpc/internal/internal.go | 51 +- .../grpc/internal/metadata/metadata.go | 2 +- .../grpc/internal/pretty/pretty.go | 2 +- .../grpc/internal/resolver/config_selector.go | 4 +- .../grpc/internal/status/status.go | 36 +- .../grpc/internal/transport/controlbuf.go | 16 +- .../grpc/internal/transport/handler_server.go | 13 +- .../grpc/internal/transport/http2_client.go | 56 +- .../grpc/internal/transport/http2_server.go | 31 +- .../grpc/internal/transport/http_util.go | 77 +- .../grpc/internal/transport/transport.go | 19 +- .../google.golang.org/grpc/picker_wrapper.go | 34 +- vendor/google.golang.org/grpc/pickfirst.go | 88 +- vendor/google.golang.org/grpc/preloader.go | 2 +- .../grpc/resolver/manual/manual.go | 119 + vendor/google.golang.org/grpc/resolver/map.go | 10 +- .../grpc/resolver/resolver.go | 76 +- .../grpc/resolver_conn_wrapper.go | 10 +- vendor/google.golang.org/grpc/rpc_util.go | 17 +- vendor/google.golang.org/grpc/server.go | 263 +- .../grpc/shared_buffer_pool.go | 4 +- vendor/google.golang.org/grpc/stats/stats.go | 14 +- .../google.golang.org/grpc/status/status.go | 14 +- vendor/google.golang.org/grpc/stream.go | 130 +- vendor/google.golang.org/grpc/tap/tap.go | 6 + vendor/google.golang.org/grpc/trace.go | 6 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 10 +- vendor/modules.txt | 242 +- 822 files changed, 63511 insertions(+), 20214 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go delete mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{blockblob => internal/shared}/mmf_unix.go (89%) rename vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/{blockblob => internal/shared}/mmf_windows.go (82%) create mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go delete mode 100644 vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go create mode 100644 vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go create mode 100644 vendor/github.com/go-logr/logr/SECURITY.md create mode 100644 vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go delete mode 100644 vendor/github.com/google/go-cmp/LICENSE delete mode 100644 vendor/github.com/google/go-cmp/cmp/compare.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/export_panic.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/export_unsafe.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/function/func.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/name.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/internal/value/sort.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/options.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/path.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_compare.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_references.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_reflect.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_slices.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_text.go delete mode 100644 vendor/github.com/google/go-cmp/cmp/report_value.go create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_client_key.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_root_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/mds_server_key.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_cert.pem create mode 100644 vendor/github.com/google/s2a-go/testdata/self_signed_key.pem create mode 100644 vendor/github.com/json-iterator/go/.codecov.yml create mode 100644 vendor/github.com/json-iterator/go/.gitignore create mode 100644 vendor/github.com/json-iterator/go/.travis.yml create mode 100644 vendor/github.com/json-iterator/go/Gopkg.lock create mode 100644 vendor/github.com/json-iterator/go/Gopkg.toml create mode 100644 vendor/github.com/json-iterator/go/LICENSE create mode 100644 vendor/github.com/json-iterator/go/README.md create mode 100644 vendor/github.com/json-iterator/go/adapter.go create mode 100644 vendor/github.com/json-iterator/go/any.go create mode 100644 vendor/github.com/json-iterator/go/any_array.go create mode 100644 vendor/github.com/json-iterator/go/any_bool.go create mode 100644 vendor/github.com/json-iterator/go/any_float.go create mode 100644 vendor/github.com/json-iterator/go/any_int32.go create mode 100644 vendor/github.com/json-iterator/go/any_int64.go create mode 100644 vendor/github.com/json-iterator/go/any_invalid.go create mode 100644 vendor/github.com/json-iterator/go/any_nil.go create mode 100644 vendor/github.com/json-iterator/go/any_number.go create mode 100644 vendor/github.com/json-iterator/go/any_object.go create mode 100644 vendor/github.com/json-iterator/go/any_str.go create mode 100644 vendor/github.com/json-iterator/go/any_uint32.go create mode 100644 vendor/github.com/json-iterator/go/any_uint64.go create mode 100644 vendor/github.com/json-iterator/go/build.sh create mode 100644 vendor/github.com/json-iterator/go/config.go create mode 100644 vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md create mode 100644 vendor/github.com/json-iterator/go/iter.go create mode 100644 vendor/github.com/json-iterator/go/iter_array.go create mode 100644 vendor/github.com/json-iterator/go/iter_float.go create mode 100644 vendor/github.com/json-iterator/go/iter_int.go create mode 100644 vendor/github.com/json-iterator/go/iter_object.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip_sloppy.go create mode 100644 vendor/github.com/json-iterator/go/iter_skip_strict.go create mode 100644 vendor/github.com/json-iterator/go/iter_str.go create mode 100644 vendor/github.com/json-iterator/go/jsoniter.go create mode 100644 vendor/github.com/json-iterator/go/pool.go create mode 100644 vendor/github.com/json-iterator/go/reflect.go create mode 100644 vendor/github.com/json-iterator/go/reflect_array.go create mode 100644 vendor/github.com/json-iterator/go/reflect_dynamic.go create mode 100644 vendor/github.com/json-iterator/go/reflect_extension.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_number.go create mode 100644 vendor/github.com/json-iterator/go/reflect_json_raw_message.go create mode 100644 vendor/github.com/json-iterator/go/reflect_map.go create mode 100644 vendor/github.com/json-iterator/go/reflect_marshaler.go create mode 100644 vendor/github.com/json-iterator/go/reflect_native.go create mode 100644 vendor/github.com/json-iterator/go/reflect_optional.go create mode 100644 vendor/github.com/json-iterator/go/reflect_slice.go create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_decoder.go create mode 100644 vendor/github.com/json-iterator/go/reflect_struct_encoder.go create mode 100644 vendor/github.com/json-iterator/go/stream.go create mode 100644 vendor/github.com/json-iterator/go/stream_float.go create mode 100644 vendor/github.com/json-iterator/go/stream_int.go create mode 100644 vendor/github.com/json-iterator/go/stream_str.go create mode 100644 vendor/github.com/json-iterator/go/test.sh create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.go create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_amd64.s create mode 100644 vendor/github.com/klauspost/compress/flate/matchlen_generic.go rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/LICENSE (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/NOTICE (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/.gitignore (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/Makefile (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/decode.go (83%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/doc.go (100%) rename vendor/github.com/matttproud/golang_protobuf_extensions/{ => v2}/pbutil/encode.go (91%) create mode 100644 vendor/github.com/modern-go/concurrent/.gitignore create mode 100644 vendor/github.com/modern-go/concurrent/.travis.yml create mode 100644 vendor/github.com/modern-go/concurrent/LICENSE create mode 100644 vendor/github.com/modern-go/concurrent/README.md create mode 100644 vendor/github.com/modern-go/concurrent/executor.go create mode 100644 vendor/github.com/modern-go/concurrent/go_above_19.go create mode 100644 vendor/github.com/modern-go/concurrent/go_below_19.go create mode 100644 vendor/github.com/modern-go/concurrent/log.go create mode 100644 vendor/github.com/modern-go/concurrent/test.sh create mode 100644 vendor/github.com/modern-go/concurrent/unbounded_executor.go create mode 100644 vendor/github.com/modern-go/reflect2/.gitignore create mode 100644 vendor/github.com/modern-go/reflect2/.travis.yml create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.lock create mode 100644 vendor/github.com/modern-go/reflect2/Gopkg.toml create mode 100644 vendor/github.com/modern-go/reflect2/LICENSE create mode 100644 vendor/github.com/modern-go/reflect2/README.md create mode 100644 vendor/github.com/modern-go/reflect2/go_above_118.go create mode 100644 vendor/github.com/modern-go/reflect2/go_above_19.go create mode 100644 vendor/github.com/modern-go/reflect2/go_below_118.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2.go create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_amd64.s create mode 100644 vendor/github.com/modern-go/reflect2/reflect2_kind.go create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_386.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_arm64.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mips64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_mipsx.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s create mode 100644 vendor/github.com/modern-go/reflect2/relfect2_s390x.s create mode 100644 vendor/github.com/modern-go/reflect2/safe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/safe_type.go create mode 100644 vendor/github.com/modern-go/reflect2/type_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_array.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_eface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_field.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_iface.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_link.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_map.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_ptr.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_slice.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_struct.go create mode 100644 vendor/github.com/modern-go/reflect2/unsafe_type.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go rename vendor/github.com/prometheus/prometheus/tsdb/{tsdbutil/chunks.go => chunks/samples.go} (51%) create mode 100644 vendor/go.opentelemetry.io/collector/pdata/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/.gitignore create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/bytesid.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1/logs_service.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1/metrics_service.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1/trace_service.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1/common.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1/logs.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1/metrics.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/attribute.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/json.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/number.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/resource.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/json/scope.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/state.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/encoding.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/exemplar_value_type.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/metric_data_point_flags.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/metric_type.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/metrics.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/number_data_point_value_type.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go create mode 100644 vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_trace.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.6.1/nonstandard.go create mode 100644 vendor/go.opentelemetry.io/collector/semconv/v1.6.1/schema.go create mode 100644 vendor/go.uber.org/goleak/.golangci.yml delete mode 100644 vendor/go.uber.org/goleak/glide.yaml create mode 100644 vendor/go.uber.org/goleak/internal/stack/scan.go create mode 100644 vendor/go.uber.org/multierr/.codecov.yml create mode 100644 vendor/go.uber.org/multierr/.gitignore create mode 100644 vendor/go.uber.org/multierr/CHANGELOG.md create mode 100644 vendor/go.uber.org/multierr/LICENSE.txt create mode 100644 vendor/go.uber.org/multierr/Makefile create mode 100644 vendor/go.uber.org/multierr/README.md create mode 100644 vendor/go.uber.org/multierr/error.go create mode 100644 vendor/go.uber.org/multierr/error_post_go120.go create mode 100644 vendor/go.uber.org/multierr/error_pre_go120.go delete mode 100644 vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 vendor/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/golang.org/x/net/http2/Makefile create mode 100644 vendor/golang.org/x/oauth2/deviceauth.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go create mode 100644 vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go rename vendor/golang.org/x/oauth2/google/internal/{externalaccount => stsexchange}/clientauth.go (88%) rename vendor/golang.org/x/oauth2/google/internal/{externalaccount => stsexchange}/sts_exchange.go (68%) create mode 100644 vendor/golang.org/x/oauth2/pkce.go delete mode 100644 vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go delete mode 100644 vendor/google.golang.org/api/transport/grpc/dial_appengine.go delete mode 100644 vendor/google.golang.org/api/transport/http/dial_appengine.go delete mode 100644 vendor/google.golang.org/appengine/.travis.yml delete mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.proto delete mode 100644 vendor/google.golang.org/appengine/socket/doc.go delete mode 100644 vendor/google.golang.org/appengine/socket/socket_classic.go delete mode 100644 vendor/google.golang.org/appengine/socket/socket_vm.go delete mode 100644 vendor/google.golang.org/appengine/travis_install.sh delete mode 100644 vendor/google.golang.org/appengine/travis_test.sh create mode 100644 vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go rename vendor/google.golang.org/grpc/{ => internal/idle}/idle.go (61%) create mode 100644 vendor/google.golang.org/grpc/resolver/manual/manual.go diff --git a/go.mod b/go.mod index c5d35e393..d7f4e56f3 100644 --- a/go.mod +++ b/go.mod @@ -3,36 +3,35 @@ module github.com/VictoriaMetrics/VictoriaMetrics go 1.19 replace ( - // Newer versions of this package break vmctl build - golang.org/x/exp => golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 - // Newer versions of this package break vmselect build github.com/VictoriaMetrics/metricsql => github.com/VictoriaMetrics/metricsql v0.56.2 + // Newer versions of this package break vmctl build + golang.org/x/exp => golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 ) require ( - cloud.google.com/go/storage v1.32.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 + cloud.google.com/go/storage v1.34.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/VictoriaMetrics/fastcache v1.12.1 // Do not use the original github.com/valyala/fasthttp because of issues // like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b github.com/VictoriaMetrics/fasthttp v1.2.0 github.com/VictoriaMetrics/metrics v1.24.0 - github.com/VictoriaMetrics/metricsql v0.56.2 - github.com/aws/aws-sdk-go-v2 v1.21.0 - github.com/aws/aws-sdk-go-v2/config v1.18.38 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.82 - github.com/aws/aws-sdk-go-v2/service/s3 v1.38.5 + github.com/VictoriaMetrics/metricsql v0.69.0 + github.com/aws/aws-sdk-go-v2 v1.22.1 + github.com/aws/aws-sdk-go-v2/config v1.22.0 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.1 + github.com/aws/aws-sdk-go-v2/service/s3 v1.42.0 github.com/cespare/xxhash/v2 v2.2.0 github.com/cheggaaa/pb/v3 v3.1.4 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/googleapis/gax-go/v2 v2.12.0 github.com/influxdata/influxdb v1.11.2 - github.com/klauspost/compress v1.16.7 - github.com/prometheus/prometheus v0.46.0 + github.com/klauspost/compress v1.17.2 + github.com/prometheus/prometheus v0.47.2 github.com/urfave/cli/v2 v2.25.7 github.com/valyala/fastjson v1.6.4 github.com/valyala/fastrand v1.1.0 @@ -40,97 +39,102 @@ require ( github.com/valyala/gozstd v1.20.1 github.com/valyala/histogram v1.2.0 github.com/valyala/quicktemplate v1.7.0 - golang.org/x/net v0.14.0 - golang.org/x/oauth2 v0.11.0 - golang.org/x/sys v0.11.0 - google.golang.org/api v0.138.0 + golang.org/x/net v0.17.0 + golang.org/x/oauth2 v0.13.0 + golang.org/x/sys v0.13.0 + google.golang.org/api v0.149.0 gopkg.in/yaml.v2 v2.4.0 ) require ( - cloud.google.com/go v0.110.7 // indirect - cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go v0.110.10 // indirect + cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.2 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect + cloud.google.com/go/iam v1.1.5 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/aws/aws-sdk-go v1.45.1 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.36 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.13.6 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.5 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.21.5 // indirect - github.com/aws/smithy-go v1.14.2 // indirect + github.com/aws/aws-sdk-go v1.47.2 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.15.1 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.5.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.17.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.25.0 // indirect + github.com/aws/smithy-go v1.16.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/fatih/color v1.15.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang-jwt/jwt/v5 v5.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.5.9 // indirect - github.com/google/s2a-go v0.1.5 // indirect - github.com/google/uuid v1.3.1 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.4.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect - github.com/prometheus/procfs v0.11.1 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/stretchr/testify v1.8.4 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.43.0 // indirect - go.opentelemetry.io/otel v1.17.0 // indirect - go.opentelemetry.io/otel/metric v1.17.0 // indirect - go.opentelemetry.io/otel/trace v1.17.0 // indirect + go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 // indirect + go.opentelemetry.io/collector/semconv v0.88.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/metric v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect go.uber.org/atomic v1.11.0 // indirect - go.uber.org/goleak v1.2.1 // indirect - golang.org/x/crypto v0.12.0 // indirect - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect - golang.org/x/sync v0.3.0 // indirect - golang.org/x/text v0.12.0 // indirect + go.uber.org/goleak v1.3.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.14.0 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/sync v0.4.0 // indirect + golang.org/x/text v0.13.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect - google.golang.org/grpc v1.57.0 // indirect + golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 // indirect + google.golang.org/grpc v1.59.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index f9ae6b983..575b9b35d 100644 --- a/go.sum +++ b/go.sum @@ -13,22 +13,22 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= -cloud.google.com/go v0.110.7/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go v0.110.10/go.mod h1:v1OoFqYxiBkUrruItNM3eT4lLByNjxmJSV/xDKJNnic= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= -cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= -cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= +cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -38,19 +38,18 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.32.0 h1:5w6DxEGOnktmJHarxAOUywxVW9lbNWIzlzzUltG/3+o= -cloud.google.com/go/storage v1.32.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +cloud.google.com/go/storage v1.34.1 h1:H2Af2dU5J0PF7A5B+ECFIce+RqxVnrVilO+cu0TS3MI= +cloud.google.com/go/storage v1.34.1/go.mod h1:VN1ElqqvR9adg1k9xlkUJ55cMOP1/QjnNNuT5xQL6dY= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 h1:/iHxaJhsFr0+xVFfbMr5vxz848jyiWuIEDhYq3y5odY= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 h1:LNHhpdK7hzUcx/k1LIcuh5k7k1LGIWLQfCjaneSj7Fc= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1/go.mod h1:uE9zaUfEQT/nbQjVi2IblCG9iaLtZsuYZ8ne+PuQ02M= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 h1:9kDVnTz3vbfweTqAUmk/a/pH5pWFCHtvRpHYC0G/dcA= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 h1:nVocQV40OQne5613EeLayJiRAJuKlBGy+m22qWG+WRg= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0/go.mod h1:7QJP7dr2wznCMeqIrhMgWGf7XpAQnVrJqDm9nvV3Cu4= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= @@ -62,7 +61,6 @@ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUM github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= @@ -86,49 +84,48 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.45.1 h1:PXuxDZIo/Y9Bvtg2t055+dY4hRwNAEcq6bUMv9fXcjk= -github.com/aws/aws-sdk-go v1.45.1/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go-v2 v1.21.0 h1:gMT0IW+03wtYJhRqTVYn0wLzwdnK9sRMcxmtfGzRdJc= -github.com/aws/aws-sdk-go-v2 v1.21.0/go.mod h1:/RfNgGmRxI+iFOB1OeJUyxiU+9s88k3pfHvDagGEp0M= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 h1:OPLEkmhXf6xFPiz0bLeDArZIDx1NNS4oJyG4nv3Gct0= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13/go.mod h1:gpAbvyDGQFozTEmlTFO8XcQKHzubdq0LzRyJpG6MiXM= -github.com/aws/aws-sdk-go-v2/config v1.18.38 h1:CByQCELMgm2tM1lAehx3XNg0R/pfeXsYzqn0Aq2chJQ= -github.com/aws/aws-sdk-go-v2/config v1.18.38/go.mod h1:vNm9Hf5VgG2fSUWhT3zFrqN/RosGcabFMYgiSoxKFU8= -github.com/aws/aws-sdk-go-v2/credentials v1.13.36 h1:ps0cPswZjpsOk6sLwG6fdXTzrYjCplgPEyG3OUbbdqE= -github.com/aws/aws-sdk-go-v2/credentials v1.13.36/go.mod h1:sY2phUzxbygoyDtTXhqi7GjGjCQ1S5a5Rj8u3ksBxCg= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 h1:uDZJF1hu0EVT/4bogChk8DyjSF6fof6uL/0Y26Ma7Fg= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11/go.mod h1:TEPP4tENqBGO99KwVpV9MlOX4NSrSLP8u3KRy2CDwA8= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.82 h1:gPh2fLhr1kwH2HXFhs1kCblIgHTabqE1N9gwYPhS/fw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.82/go.mod h1:4pzmxw8ZmkpbvGqrmedWaXuDL2xcABews1VLYqe9Djk= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 h1:22dGT7PneFMx4+b3pz7lMTRyN8ZKH7M2cW4GP9yUS2g= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41/go.mod h1:CrObHAuPneJBlfEJ5T3szXOUkLEThaGfvnhTf33buas= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 h1:SijA0mgjV8E+8G45ltVHs0fvKpTj8xmZJ3VwhGKtUSI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35/go.mod h1:SJC1nEVVva1g3pHAIdCp7QsRIkMmLAgoDquQ9Rr8kYw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 h1:GPUcE/Yq7Ur8YSUk6lVkoIMWnJNO0HT18GUzCWCgCI0= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42/go.mod h1:rzfdUlfA+jdgLDmPKjd3Chq9V7LVLYo1Nz++Wb91aRo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 h1:6lJvvkQ9HmbHZ4h/IEwclwv2mrTW8Uq1SOB/kXy0mfw= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4/go.mod h1:1PrKYwxTM+zjpw9Y41KFtoJCQrJ34Z47Y4VgVbfndjo= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 h1:m0QTSI6pZYJTk5WSKx3fm5cNW/DCicVzULBgU/6IyD0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14/go.mod h1:dDilntgHy9WnHXsh7dDtUPgHKEfTJIBUTHM8OWm0f/0= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 h1:eev2yZX7esGRjqRbnVk1UxMLw4CyVZDpZXRCcy75oQk= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36/go.mod h1:lGnOkH9NJATw0XEPcAknFBj3zzNTEGRHtSw+CwC1YTg= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 h1:CdzPW9kKitgIiLV1+MHobfR5Xg25iYnyzWZhyQuSlDI= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35/go.mod h1:QGF2Rs33W5MaN9gYdEQOBBFPLwTZkEhRwI33f7KIG0o= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 h1:v0jkRigbSD6uOdwcaUQmgEwG1BkPfAPDqaeNt/29ghg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4/go.mod h1:LhTyt8J04LL+9cIt7pYJ5lbS/U98ZmXovLOR/4LUsk8= -github.com/aws/aws-sdk-go-v2/service/s3 v1.38.5 h1:A42xdtStObqy7NGvzZKpnyNXvoOmm+FENobZ0/ssHWk= -github.com/aws/aws-sdk-go-v2/service/s3 v1.38.5/go.mod h1:rDGMZA7f4pbmTtPOk5v5UM2lmX6UAbRnMDJeDvnH7AM= -github.com/aws/aws-sdk-go-v2/service/sso v1.13.6 h1:2PylFCfKCEDv6PeSN09pC/VUiRd10wi1VfHG5FrW0/g= -github.com/aws/aws-sdk-go-v2/service/sso v1.13.6/go.mod h1:fIAwKQKBFu90pBxx07BFOMJLpRUGu8VOzLJakeY+0K4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.5 h1:dnInJb4S0oy8aQuri1mV6ipLlnZPfnsDNB9BGO9PDNY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.5/go.mod h1:yygr8ACQRY2PrEcy3xsUI357stq2AxnFM6DIsR9lij4= -github.com/aws/aws-sdk-go-v2/service/sts v1.21.5 h1:CQBFElb0LS8RojMJlxRSo/HXipvTZW2S44Lt9Mk2aYQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.21.5/go.mod h1:VC7JDqsqiwXukYEDjoHh9U0fOJtNWh04FPQz4ct4GGU= -github.com/aws/smithy-go v1.14.2 h1:MJU9hqBGbvWZdApzpvoF2WAIJDbtjK2NDJSiJP7HblQ= -github.com/aws/smithy-go v1.14.2/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/aws-sdk-go v1.47.2 h1:KEdO2PbjfEBmHvnEwbYEpr65ZIkmwK5aB85Gj19ASuA= +github.com/aws/aws-sdk-go v1.47.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.22.1 h1:sjnni/AuoTXxHitsIdT0FwmqUuNUuHtufcVDErVFT9U= +github.com/aws/aws-sdk-go-v2 v1.22.1/go.mod h1:Kd0OJtkW3Q0M0lUWGszapWjEvrXDzRW+D21JNsroB+c= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 h1:hHgLiIrTRtddC0AKcJr5s7i/hLgcpTt+q/FKxf1Zayk= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0/go.mod h1:w4I/v3NOWgD+qvs1NPEwhd++1h3XPHFaVxasfY6HlYQ= +github.com/aws/aws-sdk-go-v2/config v1.22.0 h1:9Mm99OalzZRz0ab5fpodMoHBApHS6pqRNp3M9NmzvDg= +github.com/aws/aws-sdk-go-v2/config v1.22.0/go.mod h1:2eWgw5lps8fKI7LZVTrRTYP6HE6k/uEFUuTSHfXwqP0= +github.com/aws/aws-sdk-go-v2/credentials v1.15.1 h1:hmf6lAm9hk7uLCfapZn/jL05lm6Uwdbn1B0fgjyuf4M= +github.com/aws/aws-sdk-go-v2/credentials v1.15.1/go.mod h1:QTcHga3ZbQOneJuxmGBOCxiClxmp+TlvmjFexAnJ790= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2 h1:gIeH4+o1MN/caGBWjoGQTUTIu94xD6fI5B2+TcwBf70= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2/go.mod h1:wLyMIo/zPOhQhPXTddpfdkSleyigtFi8iMnC+2m/SK4= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.1 h1:ULswbgGNVrW8zEhkCNwrwXrs1mUvy2JTqWaCRsD2ZZw= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.1/go.mod h1:pAXgsDPk1rRwwfkz8/9ISO75vXEHqTGIgbLhGqqQ1GY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1 h1:fi1ga6WysOyYb5PAf3Exd6B5GiSNpnZim4h1rhlBqx0= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1/go.mod h1:V5CY8wNurvPUibTi9mwqUqpiFZ5LnioKWIFUDtIzdI8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1 h1:ZpaV/j48RlPc4AmOZuPv22pJliXjXq8/reL63YzyFnw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1/go.mod h1:R8aXraabD2e3qv1csxM14/X9WF4wFMIY0kH4YEtYD5M= +github.com/aws/aws-sdk-go-v2/internal/ini v1.5.0 h1:DqOQvIfmGkXZUVJnl9VRk0AnxyS59tCtX9k1Pyss4Ak= +github.com/aws/aws-sdk-go-v2/internal/ini v1.5.0/go.mod h1:VV/Kbw9Mg1GWJOT9WK+oTL3cWZiXtapnNvDSRqTZLsg= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.1 h1:vzYLDkwTw4CY0vUk84MeSufRf8XIsC/GsoIFXD60sTg= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.1/go.mod h1:ToBFBnjeGR2ruMx8IWp/y7vSK3Irj5/oPwifruiqoOM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 h1:CJxo7ZBbaIzmXfV3hjcx36n9V87gJsIUPJflwqEHl3Q= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0/go.mod h1:yjVfjuY4nD1EW9i387Kau+I6V5cBA5YnC/mWNopjZrI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.1 h1:15FUCJzAP9Y25nioTqTrGlZmhOtthaXBWlt4pS+d3Xo= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.1/go.mod h1:5655NW53Un6l7JzkI6AA3rZvf0m532cSnLThA1fVXcA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1 h1:2OXw3ppu1XsB6rqKEMV4tnecTjIY3PRV2U6IP6KPJQo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1/go.mod h1:FZB4AdakIqW/yERVdGJA6Z9jraax1beXfhBBnK2wwR8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.1 h1:dnl0klXYX9EKpzZbWlH5LJL+YTcEZcJEMPFFr/rAHUQ= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.1/go.mod h1:Mfk/9Joso4tCQYzM4q4HRUIqwln8lnIIMB/OE8Zebdc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.42.0 h1:u0YoSrxjr3Lm+IqIlRAV+4YTFwkXjyB9db9CfUFge2w= +github.com/aws/aws-sdk-go-v2/service/s3 v1.42.0/go.mod h1:98EIdRu+BNsdqITsXfy+57TZfwlUQC9aDn9a9qoo90U= +github.com/aws/aws-sdk-go-v2/service/sso v1.17.0 h1:I/Oh3IxGPfHXiGnwM54TD6hNr/8TlUrBXAtTyGhR+zw= +github.com/aws/aws-sdk-go-v2/service/sso v1.17.0/go.mod h1:H6NCMvDBqA+CvIaXzaSqM6LWtzv9BzZrqBOqz+PzRF8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0 h1:irbXQkfVYIRaewYSXcu4yVk0m2T+JzZd0dkop7FjmO0= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0/go.mod h1:4wPNCkM22+oRe71oydP66K50ojDUC33XutSMi2pEF/M= +github.com/aws/aws-sdk-go-v2/service/sts v1.25.0 h1:sYIFy8tm1xQwRvVQ4CRuBGXKIg9sHNuG6+3UAQuoujk= +github.com/aws/aws-sdk-go-v2/service/sts v1.25.0/go.mod h1:S/LOQUeYDfJeJpFCIJDMjy7dwL4aA33HUdVi+i7uH8k= +github.com/aws/smithy-go v1.16.0 h1:gJZEH/Fqh+RsvlJ1Zt4tVAtV6bKkp3cC+R6FCZMNzik= +github.com/aws/smithy-go v1.16.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -144,14 +141,9 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -169,8 +161,6 @@ github.com/emicklei/go-restful/v3 v3.10.2 h1:hIovbnmBTLjHXkqEBUz3HGpXZdM7ZrE9fJI github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= @@ -179,10 +169,6 @@ github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBD github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -194,8 +180,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= @@ -259,8 +245,7 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -276,13 +261,13 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.5 h1:8IYp3w9nysqv3JH+NJgXJzGbDHzLOTj43BmSkp+O7qg= -github.com/google/s2a-go v0.1.5/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= -github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= @@ -291,7 +276,6 @@ github.com/gophercloud/gophercloud v1.5.0 h1:cDN6XFCLKiiqvYpjQLq9AiM7RDRbIC9450W github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.22.0 h1:ydEvDooB/A0c/xpsBd8GSt7P2/zYPBui4KrNip0xGjE= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= @@ -323,6 +307,7 @@ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCV github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= @@ -331,8 +316,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -349,13 +334,13 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -365,6 +350,7 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= @@ -387,39 +373,38 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= -github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= -github.com/prometheus/prometheus v0.46.0 h1:9JSdXnsuT6YsbODEhSQMwxNkGwPExfmzqG73vCMk/Kw= -github.com/prometheus/prometheus v0.46.0/go.mod h1:10L5IJE5CEsjee1FnOcVswYXlPIscDWWt3IJ2UDYrz4= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/prometheus/prometheus v0.47.2 h1:jWcnuQHz1o1Wu3MZ6nMJDuTI0kU5yJp9pkxh8XEkNvI= +github.com/prometheus/prometheus v0.47.2/go.mod h1:J/bmOSjgH7lFxz2gZhrWEZs2i64vMS+HIuZfmYNhJ/M= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.19 h1:+1H+N9QFl2Sfvia0FBYfMrHYHYhmpZxhSE0wpPL2lYs= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.20 h1:a9hSJdJcd16e0HoMsnFvaHvxB3pxSD+SC7+CISp7xY0= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -432,7 +417,6 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -472,19 +456,24 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.43.0 h1:HKORGpiOY0R0nAPtKx/ub8/7XoHhRooP8yNRkuPfelI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.43.0/go.mod h1:e+y1M74SYXo/FcIx3UATwth2+5dDkM8dBi7eXg1tbw8= -go.opentelemetry.io/otel v1.17.0 h1:MW+phZ6WZ5/uk2nd93ANk/6yJ+dVrvNWUjGhnnFU5jM= -go.opentelemetry.io/otel v1.17.0/go.mod h1:I2vmBGtFaODIVMBSTPVDlJSzBDNf93k60E6Ft0nyjo0= -go.opentelemetry.io/otel/metric v1.17.0 h1:iG6LGVz5Gh+IuO0jmgvpTB6YVrCGngi8QGm+pMd8Pdc= -go.opentelemetry.io/otel/metric v1.17.0/go.mod h1:h4skoxdZI17AxwITdmdZjjYJQH5nzijUUjm+wtPph5o= -go.opentelemetry.io/otel/trace v1.17.0 h1:/SWhSRHmDPOImIAetP1QAeMnZYiQXrTy4fMMYOdSKWQ= -go.opentelemetry.io/otel/trace v1.17.0/go.mod h1:I/4vKTgFclIsXRVucpH25X0mpFSczM7aHeaz0ZBLWjY= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 h1:AgALhc2VenoA5l1DvTdg7mkzaBGqoTSuMkAtjsttBFo= +go.opentelemetry.io/collector/pdata v1.0.0-rcv0017/go.mod h1:Rv9fOclA5AtM/JGm0d4jBOIAo1+jBA13UT5Bx0ovXi4= +go.opentelemetry.io/collector/semconv v0.88.0 h1:8TVP4hYaUC87S6CCLKNoSxsUE0ChldE4vqotvNHHUnE= +go.opentelemetry.io/collector/semconv v0.88.0/go.mod h1:j/8THcqVxFna1FpvA2zYIsUperEtOaRaqoLYIN4doWw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -493,23 +482,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -520,15 +497,13 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -563,19 +538,18 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= -golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -587,13 +561,12 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -601,7 +574,6 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -634,12 +606,12 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= +golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -649,8 +621,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -660,7 +632,6 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -668,9 +639,7 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -682,7 +651,6 @@ golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -699,13 +667,14 @@ golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -722,16 +691,16 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.138.0 h1:K/tVp05MxNVbHShRw9m7e9VJGdagNeTdMzqPH7AUqr0= -google.golang.org/api v0.138.0/go.mod h1:4xyob8CxC+0GChNBvEUAk8VBKNvYOTWM9T3v3UfRxuY= +google.golang.org/api v0.149.0 h1:b2CqT6kG+zqJIVKRQ3ELJVLN1PwHZ6DJ3dW8yl82rgY= +google.golang.org/api v0.149.0/go.mod h1:Mwn1B7JTXrzXtnvmzQE2BD6bYZQ8DShKZDZbeN9I7qI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -755,19 +724,18 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= -google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= -google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 h1:I6WNifs6pF9tNdSob2W24JtyxIYjzFB9qDlpUC76q+U= +google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405/go.mod h1:3WDQMjmJk36UQhjQ89emUzb1mdaHcPeeAh4SCBKznB4= +google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405 h1:HJMDndgxest5n2y77fnErkM62iUsptE/H8p0dC2Huo4= +google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405/go.mod h1:oT32Z4o8Zv2xPQTg0pbVaPr0MPOH6f14RgXt7zfIpwg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 h1:AB/lmRny7e2pLhFEYIbl5qkDAUt2h0ZRO4wGPhZf+ik= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405/go.mod h1:67X1fPuzjcrkymZzZV1vvkFeTn2Rvc6lYF9MYFGCcwE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -780,12 +748,9 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= -google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -810,7 +775,6 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index 639553700..540ad16ac 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.23.0" +const Version = "1.23.3" diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index b8a244178..c4cacb03f 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,27 @@ # Changes +## [1.1.5](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.4...iam/v1.1.5) (2023-11-01) + + +### Bug Fixes + +* **iam:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) + +## [1.1.4](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.3...iam/v1.1.4) (2023-10-26) + + +### Bug Fixes + +* **iam:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## [1.1.3](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.2...iam/v1.1.3) (2023-10-12) + + +### Bug Fixes + +* **iam:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) + ## [1.1.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.1...iam/v1.1.2) (2023-08-08) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 3d3e76323..85346a891 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.23.2 // source: google/iam/v1/iam_policy.proto diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index adc445b07..68f8d761f 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.23.2 // source: google/iam/v1/options.proto diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index de7995434..eefd1d0e5 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.23.2 // source: google/iam/v1/policy.proto diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index 95b54b137..31d172047 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -89,6 +89,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/alloydb/connectors/apiv1": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1", + "description": "AlloyDB connectors", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/alloydb/connectors/apiv1alpha": { "api_shortname": "connectors", "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1alpha", @@ -99,6 +109,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/alloydb/connectors/apiv1beta": { + "api_shortname": "connectors", + "distribution_name": "cloud.google.com/go/alloydb/connectors/apiv1beta", + "description": "AlloyDB connectors", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/alloydb/latest/connectors/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/analytics/admin/apiv1alpha": { "api_shortname": "analyticsadmin", "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", @@ -136,7 +156,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apigeeregistry/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/apikeys/apiv2": { @@ -349,6 +369,26 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/bigquery/biglake/apiv1": { + "api_shortname": "biglake", + "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1", + "description": "BigLake API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/bigquery/biglake/apiv1alpha1": { + "api_shortname": "biglake", + "distribution_name": "cloud.google.com/go/bigquery/biglake/apiv1alpha1", + "description": "BigLake API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/biglake/apiv1alpha1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/bigquery/connection/apiv1": { "api_shortname": "bigqueryconnection", "distribution_name": "cloud.google.com/go/bigquery/connection/apiv1", @@ -659,6 +699,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/config/apiv1": { + "api_shortname": "config", + "distribution_name": "cloud.google.com/go/config/apiv1", + "description": "Infrastructure Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/config/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/contactcenterinsights/apiv1": { "api_shortname": "contactcenterinsights", "distribution_name": "cloud.google.com/go/contactcenterinsights/apiv1", @@ -685,7 +735,7 @@ "description": "Container Analysis API", "language": "go", "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/containeranalysis/apiv1beta1", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/containeranalysis/latest/apiv1beta1", "release_level": "preview", "library_type": "GAPIC_AUTO" }, @@ -922,7 +972,7 @@ "cloud.google.com/go/dlp/apiv2": { "api_shortname": "dlp", "distribution_name": "cloud.google.com/go/dlp/apiv2", - "description": "Cloud Data Loss Prevention (DLP) API", + "description": "Cloud Data Loss Prevention (DLP)", "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dlp/latest/apiv2", @@ -1249,6 +1299,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/language/apiv2": { + "api_shortname": "language", + "distribution_name": "cloud.google.com/go/language/apiv2", + "description": "Cloud Natural Language API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/language/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/lifesciences/apiv2beta": { "api_shortname": "lifesciences", "distribution_name": "cloud.google.com/go/lifesciences/apiv2beta", @@ -1309,6 +1369,26 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/maps/fleetengine/apiv1": { + "api_shortname": "fleetengine", + "distribution_name": "cloud.google.com/go/maps/fleetengine/apiv1", + "description": "Local Rides and Deliveries API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, + "cloud.google.com/go/maps/fleetengine/delivery/apiv1": { + "api_shortname": "fleetengine", + "distribution_name": "cloud.google.com/go/maps/fleetengine/delivery/apiv1", + "description": "Last Mile Fleet Solution Delivery API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/fleetengine/delivery/apiv1", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": { "api_shortname": "mapsplatformdatasets", "distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha", @@ -1509,6 +1589,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/notebooks/apiv2": { + "api_shortname": "notebooks", + "distribution_name": "cloud.google.com/go/notebooks/apiv2", + "description": "Notebooks API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/notebooks/latest/apiv2", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/optimization/apiv1": { "api_shortname": "cloudoptimization", "distribution_name": "cloud.google.com/go/optimization/apiv1", @@ -1626,7 +1716,7 @@ "language": "go", "client_library_type": "generated", "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policysimulator/latest/apiv1", - "release_level": "preview", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/policytroubleshooter/apiv1": { @@ -1639,6 +1729,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/policytroubleshooter/iam/apiv3": { + "api_shortname": "policytroubleshooter", + "distribution_name": "cloud.google.com/go/policytroubleshooter/iam/apiv3", + "description": "Policy Troubleshooter API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/policytroubleshooter/latest/iam/apiv3", + "release_level": "stable", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/privatecatalog/apiv1beta1": { "api_shortname": "cloudprivatecatalog", "distribution_name": "cloud.google.com/go/privatecatalog/apiv1beta1", @@ -1779,6 +1879,16 @@ "release_level": "preview", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/redis/cluster/apiv1": { + "api_shortname": "redis", + "distribution_name": "cloud.google.com/go/redis/cluster/apiv1", + "description": "Google Cloud Memorystore for Redis API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/redis/latest/cluster/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/resourcemanager/apiv2": { "api_shortname": "cloudresourcemanager", "distribution_name": "cloud.google.com/go/resourcemanager/apiv2", @@ -1889,6 +1999,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/securesourcemanager/apiv1": { + "api_shortname": "securesourcemanager", + "distribution_name": "cloud.google.com/go/securesourcemanager/apiv1", + "description": "Secure Source Manager API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securesourcemanager/latest/apiv1", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/security/privateca/apiv1": { "api_shortname": "privateca", "distribution_name": "cloud.google.com/go/security/privateca/apiv1", @@ -2009,6 +2129,16 @@ "release_level": "stable", "library_type": "GAPIC_AUTO" }, + "cloud.google.com/go/shopping/merchant/inventories/apiv1beta": { + "api_shortname": "merchantapi", + "distribution_name": "cloud.google.com/go/shopping/merchant/inventories/apiv1beta", + "description": "Merchant API", + "language": "go", + "client_library_type": "generated", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/inventories/apiv1beta", + "release_level": "preview", + "library_type": "GAPIC_AUTO" + }, "cloud.google.com/go/spanner": { "api_shortname": "spanner", "distribution_name": "cloud.google.com/go/spanner", @@ -2215,8 +2345,8 @@ "description": "Video Stitcher API", "language": "go", "client_library_type": "generated", - "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/latest/video/stitcher/apiv1", - "release_level": "preview", + "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/video/latest/stitcher/apiv1", + "release_level": "stable", "library_type": "GAPIC_AUTO" }, "cloud.google.com/go/video/transcoder/apiv1": { diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 865717134..f1d51afb7 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,39 @@ # Changes +## [1.34.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.34.0...storage/v1.34.1) (2023-11-01) + + +### Bug Fixes + +* **storage:** Bump google.golang.org/api to v0.149.0 ([8d2ab9f](https://github.com/googleapis/google-cloud-go/commit/8d2ab9f320a86c1c0fab90513fc05861561d0880)) + +## [1.34.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.33.0...storage/v1.34.0) (2023-10-31) + + +### Features + +* **storage/internal:** Add match_glob field to ListObjectsRequest ([#8618](https://github.com/googleapis/google-cloud-go/issues/8618)) ([e9ae601](https://github.com/googleapis/google-cloud-go/commit/e9ae6018983ae09781740e4ff939e6e365863dbb)) +* **storage/internal:** Add terminal_storage_class fields to Autoclass message ([57fc1a6](https://github.com/googleapis/google-cloud-go/commit/57fc1a6de326456eb68ef25f7a305df6636ed386)) +* **storage/internal:** Adds the RestoreObject operation ([56ce871](https://github.com/googleapis/google-cloud-go/commit/56ce87195320634b07ae0b012efcc5f2b3813fb0)) +* **storage:** Support autoclass v2.1 ([#8721](https://github.com/googleapis/google-cloud-go/issues/8721)) ([fe1e195](https://github.com/googleapis/google-cloud-go/commit/fe1e19590a252c6adc6ca6c51a69b6e561e143b8)) +* **storage:** Support MatchGlob for gRPC ([#8670](https://github.com/googleapis/google-cloud-go/issues/8670)) ([3df0287](https://github.com/googleapis/google-cloud-go/commit/3df0287f88d5e2c4526e9e6b8dc2a4ca54f88918)), refs [#7727](https://github.com/googleapis/google-cloud-go/issues/7727) + + +### Bug Fixes + +* **storage:** Drop stream reference after closing it for gRPC writes ([#8872](https://github.com/googleapis/google-cloud-go/issues/8872)) ([525abde](https://github.com/googleapis/google-cloud-go/commit/525abdee433864d4d456f1f1fff5599017b557ff)) +* **storage:** Update golang.org/x/net to v0.17.0 ([174da47](https://github.com/googleapis/google-cloud-go/commit/174da47254fefb12921bbfc65b7829a453af6f5d)) +* **storage:** Update grpc-go to v1.56.3 ([343cea8](https://github.com/googleapis/google-cloud-go/commit/343cea8c43b1e31ae21ad50ad31d3b0b60143f8c)) +* **storage:** Update grpc-go to v1.59.0 ([81a97b0](https://github.com/googleapis/google-cloud-go/commit/81a97b06cb28b25432e4ece595c55a9857e960b7)) + +## [1.33.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.32.0...storage/v1.33.0) (2023-09-07) + + +### Features + +* **storage:** Export gRPC client constructor ([#8509](https://github.com/googleapis/google-cloud-go/issues/8509)) ([1a928ae](https://github.com/googleapis/google-cloud-go/commit/1a928ae205f2325cb5206304af4d609dc3c1447a)) + ## [1.32.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.31.0...storage/v1.32.0) (2023-08-15) diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 4ccca5224..3818c4498 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -740,6 +740,13 @@ type Autoclass struct { // If Autoclass is enabled when the bucket is created, the ToggleTime // is set to the bucket creation time. This field is read-only. ToggleTime time.Time + // TerminalStorageClass: The storage class that objects in the bucket + // eventually transition to if they are not read for a certain length of + // time. Valid values are NEARLINE and ARCHIVE. + TerminalStorageClass string + // TerminalStorageClassUpdateTime represents the time of the most recent + // update to "TerminalStorageClass". + TerminalStorageClassUpdateTime time.Time } func newBucket(b *raw.Bucket) (*BucketAttrs, error) { @@ -1241,9 +1248,11 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { } if ua.Autoclass != nil { rb.Autoclass = &raw.BucketAutoclass{ - Enabled: ua.Autoclass.Enabled, - ForceSendFields: []string{"Enabled"}, + Enabled: ua.Autoclass.Enabled, + TerminalStorageClass: ua.Autoclass.TerminalStorageClass, + ForceSendFields: []string{"Enabled"}, } + rb.ForceSendFields = append(rb.ForceSendFields, "Autoclass") } if ua.PredefinedACL != "" { // Clear ACL or the call will fail. @@ -1954,9 +1963,10 @@ func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass { if a == nil { return nil } - // Excluding read only field ToggleTime. + // Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime. return &raw.BucketAutoclass{ - Enabled: a.Enabled, + Enabled: a.Enabled, + TerminalStorageClass: a.TerminalStorageClass, } } @@ -1964,27 +1974,34 @@ func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass { if a == nil { return nil } - // Excluding read only field ToggleTime. - return &storagepb.Bucket_Autoclass{ + // Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime. + ba := &storagepb.Bucket_Autoclass{ Enabled: a.Enabled, } + if a.TerminalStorageClass != "" { + ba.TerminalStorageClass = &a.TerminalStorageClass + } + return ba } func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass { if a == nil || a.ToggleTime == "" { return nil } - // Return Autoclass.ToggleTime only if parsed with a valid value. + ac := &Autoclass{ + Enabled: a.Enabled, + TerminalStorageClass: a.TerminalStorageClass, + } + // Return ToggleTime and TSCUpdateTime only if parsed with valid values. t, err := time.Parse(time.RFC3339, a.ToggleTime) - if err != nil { - return &Autoclass{ - Enabled: a.Enabled, - } + if err == nil { + ac.ToggleTime = t } - return &Autoclass{ - Enabled: a.Enabled, - ToggleTime: t, + ut, err := time.Parse(time.RFC3339, a.TerminalStorageClassUpdateTime) + if err == nil { + ac.TerminalStorageClassUpdateTime = ut } + return ac } func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { @@ -1992,8 +2009,10 @@ func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass { return nil } return &Autoclass{ - Enabled: a.GetEnabled(), - ToggleTime: a.GetToggleTime().AsTime(), + Enabled: a.GetEnabled(), + ToggleTime: a.GetToggleTime().AsTime(), + TerminalStorageClass: a.GetTerminalStorageClass(), + TerminalStorageClassUpdateTime: a.GetTerminalStorageClassUpdateTime().AsTime(), } } diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 7978c2a48..22adb744f 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -331,6 +331,33 @@ to add a [custom audit logging] header: // Use client as usual with the context and the additional headers will be sent. client.Bucket("my-bucket").Attrs(ctx) +# Experimental gRPC API + +This package includes support for the Cloud Storage gRPC API, which is currently +in preview. This implementation uses gRPC rather than the current JSON & XML +APIs to make requests to Cloud Storage. If you would like to try the API, +please contact your GCP account rep for more information. The gRPC API is not +yet generally available, so it may be subject to breaking changes. + +To create a client which will use gRPC, use the alternate constructor: + + ctx := context.Background() + client, err := storage.NewGRPCClient(ctx) + if err != nil { + // TODO: Handle error. + } + // Use client as usual. + +If the application is running within GCP, users may get better performance by +enabling DirectPath (enabling requests to skip some proxy steps). To enable, +set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add +the following side-effect imports to your application: + + import ( + _ "google.golang.org/grpc/balancer/rls" + _ "google.golang.org/grpc/xds/googledirectpath" + ) + [Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam [XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object [Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index daaaf7142..3a5cd4088 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -141,11 +141,11 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin Project: toProjectResource(project), } var resp *storagepb.ServiceAccount - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -173,13 +173,13 @@ func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket st } var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.CreateBucket(ctx, req, s.gax...) battrs = newBucketFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) return battrs, err } @@ -193,26 +193,26 @@ func (c *grpcStorageClient) ListBuckets(ctx context.Context, project string, opt var gitr *gapic.BucketIterator fetch := func(pageSize int, pageToken string) (token string, err error) { - // Initialize GAPIC-based iterator when pageToken is empty, which - // indicates that this fetch call is attempting to get the first page. - // - // Note: Initializing the GAPIC-based iterator lazily is necessary to - // capture the BucketIterator.Prefix set by the user *after* the - // BucketIterator is returned to them from the veneer. - if pageToken == "" { - req := &storagepb.ListBucketsRequest{ - Parent: toProjectResource(it.projectID), - Prefix: it.Prefix, - } - gitr = c.raw.ListBuckets(it.ctx, req, s.gax...) - } var buckets []*storagepb.Bucket var next string - err = run(it.ctx, func() error { + err = run(it.ctx, func(ctx context.Context) error { + // Initialize GAPIC-based iterator when pageToken is empty, which + // indicates that this fetch call is attempting to get the first page. + // + // Note: Initializing the GAPIC-based iterator lazily is necessary to + // capture the BucketIterator.Prefix set by the user *after* the + // BucketIterator is returned to them from the veneer. + if pageToken == "" { + req := &storagepb.ListBucketsRequest{ + Parent: toProjectResource(it.projectID), + Prefix: it.Prefix, + } + gitr = c.raw.ListBuckets(ctx, req, s.gax...) + } buckets, next, err = gitr.InternalFetch(pageSize, pageToken) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -246,9 +246,9 @@ func (c *grpcStorageClient) DeleteBucket(ctx context.Context, bucket string, con ctx = setUserProjectMetadata(ctx, s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return c.raw.DeleteBucket(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { @@ -265,13 +265,13 @@ func (c *grpcStorageClient) GetBucket(ctx context.Context, bucket string, conds } var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.GetBucket(ctx, req, s.gax...) battrs = newBucketFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return nil, ErrBucketNotExist @@ -369,11 +369,11 @@ func (c *grpcStorageClient) UpdateBucket(ctx context.Context, bucket string, uat req.UpdateMask = fieldMask var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateBucket(ctx, req, s.gax...) battrs = newBucketFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) return battrs, err } @@ -386,10 +386,10 @@ func (c *grpcStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucke return err } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := c.raw.LockBucketRetentionPolicy(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { @@ -408,23 +408,21 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q LexicographicStart: it.query.StartOffset, LexicographicEnd: it.query.EndOffset, IncludeTrailingDelimiter: it.query.IncludeTrailingDelimiter, + MatchGlob: it.query.MatchGlob, ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - gitr := c.raw.ListObjects(it.ctx, req, s.gax...) fetch := func(pageSize int, pageToken string) (token string, err error) { - // MatchGlob not yet supported for gRPC. - // TODO: add support when b/287306063 resolved. - if q != nil && q.MatchGlob != "" { - return "", status.Errorf(codes.Unimplemented, "MatchGlob is not supported for gRPC") - } var objects []*storagepb.Object - err = run(it.ctx, func() error { + var gitr *gapic.ObjectIterator + err = run(it.ctx, func(ctx context.Context) error { + gitr = c.raw.ListObjects(ctx, req, s.gax...) + it.ctx = ctx objects, token, err = gitr.InternalFetch(pageSize, pageToken) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { err = ErrBucketNotExist @@ -467,9 +465,9 @@ func (c *grpcStorageClient) DeleteObject(ctx context.Context, bucket, object str if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { return c.raw.DeleteObject(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return ErrObjectNotExist } @@ -495,12 +493,12 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string } var attrs *ObjectAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.GetObject(ctx, req, s.gax...) attrs = newObjectFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound { return nil, ErrObjectNotExist @@ -577,11 +575,11 @@ func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object str req.UpdateMask = fieldMask var attrs *ObjectAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { res, err := c.raw.UpdateObject(ctx, req, s.gax...) attrs = newObjectFromProto(res) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if e, ok := status.FromError(err); ok && e.Code() == codes.NotFound { return nil, ErrObjectNotExist } @@ -820,10 +818,10 @@ func (c *grpcStorageClient) ComposeObject(ctx context.Context, req *composeObjec var obj *storagepb.Object var err error - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { obj, err = c.raw.ComposeObject(ctx, rawReq, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } @@ -870,9 +868,9 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec var res *storagepb.RewriteResponse var err error - retryCall := func() error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } + retryCall := func(ctx context.Context) error { res, err = c.raw.RewriteObject(ctx, call, s.gax...); return err } - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)); err != nil { + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { return nil, err } @@ -936,7 +934,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange var msg *storagepb.ReadObjectResponse var err error - err = run(cc, func() error { + err = run(cc, func(ctx context.Context) error { stream, err = c.raw.ReadObject(cc, req, s.gax...) if err != nil { return err @@ -950,7 +948,7 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { // Close the stream context we just created to ensure we don't leak // resources. @@ -1112,11 +1110,11 @@ func (c *grpcStorageClient) GetIamPolicy(ctx context.Context, resource string, v }, } var rp *iampb.Policy - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error rp, err = c.raw.GetIamPolicy(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) return rp, err } @@ -1130,10 +1128,10 @@ func (c *grpcStorageClient) SetIamPolicy(ctx context.Context, resource string, p Policy: policy, } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := c.raw.SetIamPolicy(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { @@ -1144,11 +1142,11 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str Permissions: permissions, } var res *iampb.TestIamPermissionsResponse - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = c.raw.TestIamPermissions(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1167,11 +1165,11 @@ func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID st ctx = setUserProjectMetadata(ctx, s.userProject) } var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1193,13 +1191,13 @@ func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAc projectID: project, retry: s.retry, } - gitr := c.raw.ListHmacKeys(it.ctx, req, s.gax...) fetch := func(pageSize int, pageToken string) (token string, err error) { var hmacKeys []*storagepb.HmacKeyMetadata - err = run(it.ctx, func() error { + err = run(it.ctx, func(ctx context.Context) error { + gitr := c.raw.ListHmacKeys(ctx, req, s.gax...) hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -1246,11 +1244,11 @@ func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceA ctx = setUserProjectMetadata(ctx, s.userProject) } var metadata *storagepb.HmacKeyMetadata - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1267,11 +1265,11 @@ func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceA ctx = setUserProjectMetadata(ctx, s.userProject) } var res *storagepb.CreateHmacKeyResponse - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = c.raw.CreateHmacKey(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1290,9 +1288,9 @@ func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, a if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return c.raw.DeleteHmacKey(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } // Notification methods. @@ -1309,7 +1307,7 @@ func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string Parent: bucketResourceName(globalProjectAlias, bucket), } var notifications []*storagepb.NotificationConfig - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...) for { // PageSize is not set and fallbacks to the API default pageSize of 100. @@ -1324,7 +1322,7 @@ func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string } req.PageToken = nextPageToken } - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1342,11 +1340,11 @@ func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket strin NotificationConfig: toProtoNotification(n), } var pbn *storagepb.NotificationConfig - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { var err error pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...) return err - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1359,9 +1357,9 @@ func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket strin s := callSettings(c.settings, opts...) req := &storagepb.DeleteNotificationConfigRequest{Name: id} - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return c.raw.DeleteNotificationConfig(ctx, req, s.gax...) - }, s.retry, s.idempotent, setRetryHeaderGRPC(ctx)) + }, s.retry, s.idempotent) } // setUserProjectMetadata appends a project ID to the outgoing Context metadata @@ -1560,24 +1558,24 @@ func (w *gRPCWriter) startResumableUpload() error { // the upload, but in the future, we must also support sending it // on the *last* message of the stream. req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs) - return run(w.ctx, func() error { + return run(w.ctx, func(ctx context.Context) error { upres, err := w.c.raw.StartResumableWrite(w.ctx, req) w.upid = upres.GetUploadId() return err - }, w.settings.retry, w.settings.idempotent, setRetryHeaderGRPC(w.ctx)) + }, w.settings.retry, w.settings.idempotent) } // queryProgress is a helper that queries the status of the resumable upload // associated with the given upload ID. func (w *gRPCWriter) queryProgress() (int64, error) { var persistedSize int64 - err := run(w.ctx, func() error { + err := run(w.ctx, func(ctx context.Context) error { q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ UploadId: w.upid, }) persistedSize = q.GetPersistedSize() return err - }, w.settings.retry, true, setRetryHeaderGRPC(w.ctx)) + }, w.settings.retry, true) // q.GetCommittedSize() will return 0 if q is nil. return persistedSize, err @@ -1662,6 +1660,10 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st // status. Closing the stream receives the status as an error. _, err = w.stream.CloseAndRecv() + // Drop the stream reference as a new one will need to be created if + // we can retry the upload + w.stream = nil + // Retriable errors mean we should start over and attempt to // resend the entire buffer via a new stream. // If not retriable, falling through will return the error received diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go index 30b67f437..1b9fbe9dd 100644 --- a/vendor/cloud.google.com/go/storage/hmac.go +++ b/vendor/cloud.google.com/go/storage/hmac.go @@ -289,12 +289,11 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, call = call.MaxResults(int64(pageSize)) } - ctx := it.ctx var resp *raw.HmacKeysMetadata - err = run(it.ctx, func() error { + err = run(it.ctx, func(ctx context.Context) error { resp, err = call.Context(ctx).Do() return err - }, it.retry, true, setRetryHeaderHTTP(call)) + }, it.retry, true) if err != nil { return "", err } diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index 7243a4974..b62f009da 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -45,8 +45,6 @@ import ( // httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic // storageClient interface. -// -// This is an experimental API and not intended for public use. type httpStorageClient struct { creds *google.Credentials hc *http.Client @@ -59,8 +57,6 @@ type httpStorageClient struct { // newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON // Storage API. -// -// This is an experimental API and not intended for public use. func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageClient, error) { s := initSettings(opts...) o := s.clientOption @@ -152,11 +148,11 @@ func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project strin s := callSettings(c.settings, opts...) call := c.raw.Projects.ServiceAccount.Get(project) var res *raw.ServiceAccount - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -186,14 +182,14 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket st req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) } var battrs *BucketAttrs - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { b, err := req.Context(ctx).Do() if err != nil { return err } battrs, err = newBucket(b) return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) return battrs, err } @@ -214,10 +210,10 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt req.MaxResults(int64(pageSize)) } var resp *raw.Buckets - err = run(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() + err = run(it.ctx, func(ctx context.Context) error { + resp, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -252,7 +248,7 @@ func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, con req.UserProject(s.userProject) } - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) { @@ -268,10 +264,10 @@ func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds } var resp *raw.Bucket - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { resp, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { @@ -302,10 +298,10 @@ func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uat } var rawBucket *raw.Bucket - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { rawBucket, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -321,10 +317,10 @@ func (c *httpStorageClient) LockBucketRetentionPolicy(ctx context.Context, bucke } req := c.raw.Buckets.LockRetentionPolicy(bucket, metageneration) - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Query, opts ...storageOption) *ObjectIterator { s := callSettings(c.settings, opts...) @@ -361,10 +357,10 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q } var resp *raw.Objects var err error - err = run(it.ctx, func() error { - resp, err = req.Context(it.ctx).Do() + err = run(it.ctx, func(ctx context.Context) error { + resp, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { @@ -399,7 +395,7 @@ func (c *httpStorageClient) DeleteObject(ctx context.Context, bucket, object str if s.userProject != "" { req.UserProject(s.userProject) } - err := run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + err := run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { return ErrObjectNotExist @@ -421,10 +417,10 @@ func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string } var obj *raw.Object var err error - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) var e *googleapi.Error if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound { return nil, ErrObjectNotExist @@ -503,7 +499,7 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str rawObj := attrs.toRawObject(bucket) rawObj.ForceSendFields = forceSendFields rawObj.NullFields = nullFields - call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full").Context(ctx) + call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full") if err := applyConds("Update", gen, conds, call); err != nil { return nil, err } @@ -518,7 +514,7 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str } var obj *raw.Object var err error - err = run(ctx, func() error { obj, err = call.Do(); return err }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + err = run(ctx, func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }, s.retry, s.idempotent) var e *googleapi.Error if errors.As(err, &e) && e.Code == http.StatusNotFound { return nil, ErrObjectNotExist @@ -535,7 +531,7 @@ func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket s s := callSettings(c.settings, opts...) req := c.raw.DefaultObjectAccessControls.Delete(bucket, string(entity)) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { @@ -544,10 +540,10 @@ func (c *httpStorageClient) ListDefaultObjectACLs(ctx context.Context, bucket st var err error req := c.raw.DefaultObjectAccessControls.List(bucket) configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() return err - }, s.retry, true, setRetryHeaderHTTP(req)) + }, s.retry, true) if err != nil { return nil, err } @@ -564,14 +560,13 @@ func (c *httpStorageClient) UpdateDefaultObjectACL(ctx context.Context, bucket s Entity: string(entity), Role: string(role), } - var req setRequest var err error - req = c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) + req := c.raw.DefaultObjectAccessControls.Update(bucket, string(entity), acl) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { - _, err = req.Do() + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } // Bucket ACL methods. @@ -580,7 +575,7 @@ func (c *httpStorageClient) DeleteBucketACL(ctx context.Context, bucket string, s := callSettings(c.settings, opts...) req := c.raw.BucketAccessControls.Delete(bucket, string(entity)) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, opts ...storageOption) ([]ACLRule, error) { @@ -589,10 +584,10 @@ func (c *httpStorageClient) ListBucketACLs(ctx context.Context, bucket string, o var err error req := c.raw.BucketAccessControls.List(bucket) configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() return err - }, s.retry, true, setRetryHeaderHTTP(req)) + }, s.retry, true) if err != nil { return nil, err } @@ -609,10 +604,10 @@ func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string, req := c.raw.BucketAccessControls.Update(bucket, string(entity), acl) configureACLCall(ctx, s.userProject, req) var err error - return run(ctx, func() error { - _, err = req.Do() + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } // configureACLCall sets the context, user project and headers on the apiary library call. @@ -632,7 +627,7 @@ func (c *httpStorageClient) DeleteObjectACL(ctx context.Context, bucket, object s := callSettings(c.settings, opts...) req := c.raw.ObjectAccessControls.Delete(bucket, object, string(entity)) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { return req.Context(ctx).Do() }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + return run(ctx, func(ctx context.Context) error { return req.Context(ctx).Do() }, s.retry, s.idempotent) } // ListObjectACLs retrieves object ACL entries. By default, it operates on the latest generation of this object. @@ -643,10 +638,10 @@ func (c *httpStorageClient) ListObjectACLs(ctx context.Context, bucket, object s var err error req := c.raw.ObjectAccessControls.List(bucket, object) configureACLCall(ctx, s.userProject, req) - err = run(ctx, func() error { - acls, err = req.Do() + err = run(ctx, func(ctx context.Context) error { + acls, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -665,14 +660,13 @@ func (c *httpStorageClient) UpdateObjectACL(ctx context.Context, bucket, object Entity: string(entity), Role: string(role), } - var req setRequest var err error - req = c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) + req := c.raw.ObjectAccessControls.Update(bucket, object, string(entity), acl) configureACLCall(ctx, s.userProject, req) - return run(ctx, func() error { - _, err = req.Do() + return run(ctx, func(ctx context.Context) error { + _, err = req.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(req)) + }, s.retry, s.idempotent) } // Media operations. @@ -696,7 +690,7 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec rawReq.SourceObjects = append(rawReq.SourceObjects, srcObj) } - call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq).Context(ctx) + call := c.raw.Objects.Compose(req.dstBucket, req.dstObject.name, rawReq) if err := applyConds("ComposeFrom destination", defaultGen, req.dstObject.conds, call); err != nil { return nil, err } @@ -713,9 +707,9 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec setClientHeader(call.Header()) var err error - retryCall := func() error { obj, err = call.Do(); return err } + retryCall := func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err } - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { return nil, err } return newObject(obj), nil @@ -725,7 +719,7 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec rawObject := req.dstObject.attrs.toRawObject("") call := c.raw.Objects.Rewrite(req.srcObject.bucket, req.srcObject.name, req.dstObject.bucket, req.dstObject.name, rawObject) - call.Context(ctx).Projection("full") + call.Projection("full") if req.token != "" { call.RewriteToken(req.token) } @@ -761,9 +755,9 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec var err error setClientHeader(call.Header()) - retryCall := func() error { res, err = call.Do(); return err } + retryCall := func(ctx context.Context) error { res, err = call.Context(ctx).Do(); return err } - if err := run(ctx, retryCall, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + if err := run(ctx, retryCall, s.retry, s.idempotent); err != nil { return nil, err } @@ -805,7 +799,6 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa if err != nil { return nil, err } - req = req.WithContext(ctx) if s.userProject != "" { req.Header.Set("X-Goog-User-Project", s.userProject) @@ -825,7 +818,7 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa } reopen := readerReopen(ctx, req.Header, params, s, - func() (*http.Response, error) { return c.hc.Do(req) }, + func(ctx context.Context) (*http.Response, error) { return c.hc.Do(req.WithContext(ctx)) }, func() error { return setConditionsHeaders(req.Header, params.conds) }, func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) }) @@ -840,7 +833,6 @@ func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newR call := c.raw.Objects.Get(params.bucket, params.object) setClientHeader(call.Header()) - call.Context(ctx) call.Projection("full") if s.userProject != "" { @@ -851,7 +843,7 @@ func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newR return nil, err } - reopen := readerReopen(ctx, call.Header(), params, s, func() (*http.Response, error) { return call.Download() }, + reopen := readerReopen(ctx, call.Header(), params, s, func(ctx context.Context) (*http.Response, error) { return call.Context(ctx).Download() }, func() error { return applyConds("NewReader", params.gen, params.conds, call) }, func() { call.Generation(params.gen) }) @@ -961,11 +953,11 @@ func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, v call.UserProject(s.userProject) } var rp *raw.Policy - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error rp, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -982,10 +974,10 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p call.UserProject(s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { _, err := call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) } func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) { @@ -996,11 +988,11 @@ func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource str call.UserProject(s.userProject) } var res *raw.TestIamPermissionsResponse - err := run(ctx, func() error { + err := run(ctx, func(ctx context.Context) error { var err error res, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1018,10 +1010,10 @@ func (c *httpStorageClient) GetHMACKey(ctx context.Context, project, accessID st var metadata *raw.HmacKeyMetadata var err error - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { metadata, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } hk := &raw.HmacKey{ @@ -1058,10 +1050,10 @@ func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAc } var resp *raw.HmacKeysMetadata - err = run(it.ctx, func() error { - resp, err = call.Context(it.ctx).Do() + err = run(it.ctx, func(ctx context.Context) error { + resp, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return "", err } @@ -1103,10 +1095,10 @@ func (c *httpStorageClient) UpdateHMACKey(ctx context.Context, project, serviceA var metadata *raw.HmacKeyMetadata var err error - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { metadata, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } hk := &raw.HmacKey{ @@ -1123,11 +1115,11 @@ func (c *httpStorageClient) CreateHMACKey(ctx context.Context, project, serviceA } var hk *raw.HmacKey - if err := run(ctx, func() error { + if err := run(ctx, func(ctx context.Context) error { h, err := call.Context(ctx).Do() hk = h return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)); err != nil { + }, s.retry, s.idempotent); err != nil { return nil, err } return toHMACKeyFromRaw(hk, true) @@ -1139,9 +1131,9 @@ func (c *httpStorageClient) DeleteHMACKey(ctx context.Context, project string, a if s.userProject != "" { call = call.UserProject(s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return call.Context(ctx).Do() - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) } // Notification methods. @@ -1160,10 +1152,10 @@ func (c *httpStorageClient) ListNotifications(ctx context.Context, bucket string call.UserProject(s.userProject) } var res *raw.Notifications - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { res, err = call.Context(ctx).Do() return err - }, s.retry, true, setRetryHeaderHTTP(call)) + }, s.retry, true) if err != nil { return nil, err } @@ -1180,10 +1172,10 @@ func (c *httpStorageClient) CreateNotification(ctx context.Context, bucket strin call.UserProject(s.userProject) } var rn *raw.Notification - err = run(ctx, func() error { + err = run(ctx, func(ctx context.Context) error { rn, err = call.Context(ctx).Do() return err - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) if err != nil { return nil, err } @@ -1199,9 +1191,9 @@ func (c *httpStorageClient) DeleteNotification(ctx context.Context, bucket strin if s.userProject != "" { call.UserProject(s.userProject) } - return run(ctx, func() error { + return run(ctx, func(ctx context.Context) error { return call.Context(ctx).Do() - }, s.retry, s.idempotent, setRetryHeaderHTTP(call)) + }, s.retry, s.idempotent) } type httpReader struct { @@ -1250,7 +1242,7 @@ func setRangeReaderHeaders(h http.Header, params *newRangeReaderParams) error { // readerReopen initiates a Read with offset and length, assuming we // have already read seen bytes. func readerReopen(ctx context.Context, header http.Header, params *newRangeReaderParams, s *settings, - doDownload func() (*http.Response, error), applyConditions func() error, setGeneration func()) func(int64) (*http.Response, error) { + doDownload func(context.Context) (*http.Response, error), applyConditions func() error, setGeneration func()) func(int64) (*http.Response, error) { return func(seen int64) (*http.Response, error) { // If the context has already expired, return immediately without making a // call. @@ -1277,8 +1269,8 @@ func readerReopen(ctx context.Context, header http.Header, params *newRangeReade var err error var res *http.Response - err = run(ctx, func() error { - res, err = doDownload() + err = run(ctx, func(ctx context.Context) error { + res, err = doDownload(ctx) if err != nil { var e *googleapi.Error if errors.As(err, &e) { @@ -1332,7 +1324,7 @@ func readerReopen(ctx context.Context, header http.Header, params *newRangeReade params.gen = gen64 } return nil - }, s.retry, s.idempotent, setRetryHeaderHTTP(nil)) + }, s.retry, s.idempotent) if err != nil { return nil, err } diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index 40e7ae180..c50b0fb0e 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -17,7 +17,17 @@ // Package storage is an auto-generated package for the // Cloud Storage API. // -// Lets you store and retrieve potentially-large, immutable data objects. +// Stop. This folder is likely not what you are looking for. This folder +// contains protocol buffer definitions for an unreleased API for accessing +// Cloud Storage. Unless told otherwise by a Google Cloud representative, do +// not use any of the contents of this folder. If you would like to use Cloud +// Storage, please consult our official documentation (at +// https://cloud.google.com/storage/docs/apis) for details on our XML and +// JSON APIs, or else consider one of our client libraries (at +// https://cloud.google.com/storage/docs/reference/libraries). This API +// defined in this folder is unreleased and may shut off, break, or fail at +// any time for any users who are not registered as a part of a private +// preview program. // // # General documentation // diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json index 8bbb154c0..56256bb2c 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json @@ -10,6 +10,11 @@ "grpc": { "libraryClient": "Client", "rpcs": { + "BidiWriteObject": { + "methods": [ + "BidiWriteObject" + ] + }, "CancelResumableWrite": { "methods": [ "CancelResumableWrite" @@ -120,6 +125,11 @@ "ReadObject" ] }, + "RestoreObject": { + "methods": [ + "RestoreObject" + ] + }, "RewriteObject": { "methods": [ "RewriteObject" diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 8f2f66851..0de7e3fb4 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -56,11 +56,13 @@ type CallOptions struct { ListNotificationConfigs []gax.CallOption ComposeObject []gax.CallOption DeleteObject []gax.CallOption + RestoreObject []gax.CallOption CancelResumableWrite []gax.CallOption GetObject []gax.CallOption ReadObject []gax.CallOption UpdateObject []gax.CallOption WriteObject []gax.CallOption + BidiWriteObject []gax.CallOption ListObjects []gax.CallOption RewriteObject []gax.CallOption StartResumableWrite []gax.CallOption @@ -282,6 +284,19 @@ func defaultCallOptions() *CallOptions { }) }), }, + RestoreObject: []gax.CallOption{ + gax.WithTimeout(60000 * time.Millisecond), + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, CancelResumableWrite: []gax.CallOption{ gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { @@ -345,6 +360,18 @@ func defaultCallOptions() *CallOptions { }) }), }, + BidiWriteObject: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, ListObjects: []gax.CallOption{ gax.WithTimeout(60000 * time.Millisecond), gax.WithRetry(func() gax.Retryer { @@ -498,11 +525,13 @@ type internalClient interface { ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error) DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error + RestoreObject(context.Context, *storagepb.RestoreObjectRequest, ...gax.CallOption) (*storagepb.Object, error) CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error) ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error) WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) + BidiWriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) ListObjects(context.Context, *storagepb.ListObjectsRequest, ...gax.CallOption) *ObjectIterator RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error) StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) @@ -598,16 +627,16 @@ func (c *Client) LockBucketRetentionPolicy(ctx context.Context, req *storagepb.L // GetIamPolicy gets the IAM policy for a specified bucket or object. // The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. +// projects/_/buckets/{bucket} for a bucket or +// projects/_/buckets/{bucket}/objects/{object} for an object. func (c *Client) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.GetIamPolicy(ctx, req, opts...) } // SetIamPolicy updates an IAM policy for the specified bucket or object. // The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. +// projects/_/buckets/{bucket} for a bucket or +// projects/_/buckets/{bucket}/objects/{object} for an object. func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest, opts ...gax.CallOption) (*iampb.Policy, error) { return c.internalClient.SetIamPolicy(ctx, req, opts...) } @@ -615,8 +644,8 @@ func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReques // TestIamPermissions tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. // The resource field in the request should be -// projects//buckets/ for a bucket or -// projects//buckets//objects/ for an object. +// projects/_/buckets/{bucket} for a bucket or +// projects/_/buckets/{bucket}/objects/{object} for an object. func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) { return c.internalClient.TestIamPermissions(ctx, req, opts...) } @@ -665,6 +694,11 @@ func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRe return c.internalClient.DeleteObject(ctx, req, opts...) } +// RestoreObject restores a soft-deleted object. +func (c *Client) RestoreObject(ctx context.Context, req *storagepb.RestoreObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + return c.internalClient.RestoreObject(ctx, req, opts...) +} + // CancelResumableWrite cancels an in-progress resumable upload. // // Any attempts to write to the resumable upload after cancelling the upload @@ -752,10 +786,33 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe // Attempting to resume an already finalized object will result in an OK // status, with a WriteObjectResponse containing the finalized object’s // metadata. +// +// Alternatively, the BidiWriteObject operation may be used to write an +// object with controls over flushing and the ability to fetch the ability to +// determine the current persisted size. func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) { return c.internalClient.WriteObject(ctx, opts...) } +// BidiWriteObject stores a new object and metadata. +// +// This is similar to the WriteObject call with the added support for +// manual flushing of persisted state, and the ability to determine current +// persisted size without closing the stream. +// +// The client may specify one or both of the state_lookup and flush fields +// in each BidiWriteObjectRequest. If flush is specified, the data written +// so far will be persisted to storage. If state_lookup is specified, the +// service will respond with a BidiWriteObjectResponse that contains the +// persisted size. If both flush and state_lookup are specified, the flush +// will always occur before a state_lookup, so that both may be set in the +// same request and the returned state will be the state of the object +// post-flush. When the stream is closed, a BidiWriteObjectResponse will +// always be sent to the client, regardless of the value of state_lookup. +func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) { + return c.internalClient.BidiWriteObject(ctx, opts...) +} + // ListObjects retrieves a list of objects matching the criteria. func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { return c.internalClient.ListObjects(ctx, req, opts...) @@ -1375,6 +1432,33 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje return err } +func (c *gRPCClient) RestoreObject(ctx context.Context, req *storagepb.RestoreObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) { + routingHeaders := "" + routingHeadersMap := make(map[string]string) + if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 { + routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1]) + } + for headerName, headerValue := range routingHeadersMap { + routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue) + } + routingHeaders = strings.TrimSuffix(routingHeaders, "&") + hds := []string{"x-goog-request-params", routingHeaders} + + hds = append(c.xGoogHeaders, hds...) + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...) + opts = append((*c.CallOptions).RestoreObject[0:len((*c.CallOptions).RestoreObject):len((*c.CallOptions).RestoreObject)], opts...) + var resp *storagepb.Object + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.RestoreObject(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.CancelResumableWriteRequest, opts ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error) { routingHeaders := "" routingHeadersMap := make(map[string]string) @@ -1498,6 +1582,21 @@ func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (s return resp, nil } +func (c *gRPCClient) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error) { + ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...) + var resp storagepb.Storage_BidiWriteObjectClient + opts = append((*c.CallOptions).BidiWriteObject[0:len((*c.CallOptions).BidiWriteObject):len((*c.CallOptions).BidiWriteObject)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.BidiWriteObject(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator { routingHeaders := "" routingHeadersMap := make(map[string]string) diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go index 2bfd73dea..3486fd153 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go @@ -177,7 +177,7 @@ func (x ServiceConstants_Values) Number() protoreflect.EnumNumber { // Deprecated: Use ServiceConstants_Values.Descriptor instead. func (ServiceConstants_Values) EnumDescriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42, 0} } // Request message for DeleteBucket. @@ -338,7 +338,7 @@ type CreateBucketRequest struct { Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Properties of the new bucket being inserted. // The name of the bucket is specified in the `bucket_id` field. Populating - // `bucket.name` field will be ignored. + // `bucket.name` field will result in an error. // The project of the bucket must be specified in the `bucket.project` field. // This field must be in `projects/{projectIdentifier}` format, // {projectIdentifier} can be the project ID or project number. The `parent` @@ -1277,6 +1277,137 @@ func (x *DeleteObjectRequest) GetCommonObjectRequestParams() *CommonObjectReques return nil } +// Message for restoring an object. +// `bucket`, `object`, and `generation` **must** be set. +type RestoreObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the bucket in which the object resides. + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + // Required. The name of the object to restore. + Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` + // Required. The specific revision of the object to restore. + Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // Makes the operation conditional on whether the object's current generation + // matches the given value. Setting to 0 makes the operation succeed only if + // there are no live versions of the object. + IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"` + // Makes the operation conditional on whether the object's live generation + // does not match the given value. If no live object exists, the precondition + // fails. Setting to 0 makes the operation succeed only if there is a live + // version of the object. + IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration matches the given value. + IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"` + // Makes the operation conditional on whether the object's current + // metageneration does not match the given value. + IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"` + // If false or unset, the bucket's default object ACL will be used. + // If true, copy the source object's access controls. + // Return an error if bucket has UBLA enabled. + CopySourceAcl *bool `protobuf:"varint,9,opt,name=copy_source_acl,json=copySourceAcl,proto3,oneof" json:"copy_source_acl,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *RestoreObjectRequest) Reset() { + *x = RestoreObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RestoreObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RestoreObjectRequest) ProtoMessage() {} + +func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead. +func (*RestoreObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} +} + +func (x *RestoreObjectRequest) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *RestoreObjectRequest) GetObject() string { + if x != nil { + return x.Object + } + return "" +} + +func (x *RestoreObjectRequest) GetGeneration() int64 { + if x != nil { + return x.Generation + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 { + if x != nil && x.IfGenerationMatch != nil { + return *x.IfGenerationMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfGenerationNotMatch() int64 { + if x != nil && x.IfGenerationNotMatch != nil { + return *x.IfGenerationNotMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfMetagenerationMatch() int64 { + if x != nil && x.IfMetagenerationMatch != nil { + return *x.IfMetagenerationMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetIfMetagenerationNotMatch() int64 { + if x != nil && x.IfMetagenerationNotMatch != nil { + return *x.IfMetagenerationNotMatch + } + return 0 +} + +func (x *RestoreObjectRequest) GetCopySourceAcl() bool { + if x != nil && x.CopySourceAcl != nil { + return *x.CopySourceAcl + } + return false +} + +func (x *RestoreObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + // Message for canceling an in-progress resumable upload. // `upload_id` **must** be set. type CancelResumableWriteRequest struct { @@ -1292,7 +1423,7 @@ type CancelResumableWriteRequest struct { func (x *CancelResumableWriteRequest) Reset() { *x = CancelResumableWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1305,7 +1436,7 @@ func (x *CancelResumableWriteRequest) String() string { func (*CancelResumableWriteRequest) ProtoMessage() {} func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[14] + mi := &file_google_storage_v2_storage_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1318,7 +1449,7 @@ func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead. func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} } func (x *CancelResumableWriteRequest) GetUploadId() string { @@ -1339,7 +1470,7 @@ type CancelResumableWriteResponse struct { func (x *CancelResumableWriteResponse) Reset() { *x = CancelResumableWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1352,7 +1483,7 @@ func (x *CancelResumableWriteResponse) String() string { func (*CancelResumableWriteResponse) ProtoMessage() {} func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[15] + mi := &file_google_storage_v2_storage_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1365,7 +1496,7 @@ func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead. func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} } // Request message for ReadObject. @@ -1427,7 +1558,7 @@ type ReadObjectRequest struct { func (x *ReadObjectRequest) Reset() { *x = ReadObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1440,7 +1571,7 @@ func (x *ReadObjectRequest) String() string { func (*ReadObjectRequest) ProtoMessage() {} func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[16] + mi := &file_google_storage_v2_storage_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1453,7 +1584,7 @@ func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead. func (*ReadObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} } func (x *ReadObjectRequest) GetBucket() string { @@ -1546,6 +1677,8 @@ type GetObjectRequest struct { // If present, selects a specific revision of this object (as opposed to the // latest version, the default). Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` + // If true, return the soft-deleted version of this object. + SoftDeleted *bool `protobuf:"varint,11,opt,name=soft_deleted,json=softDeleted,proto3,oneof" json:"soft_deleted,omitempty"` // Makes the operation conditional on whether the object's current generation // matches the given value. Setting to 0 makes the operation succeed only if // there are no live versions of the object. @@ -1573,7 +1706,7 @@ type GetObjectRequest struct { func (x *GetObjectRequest) Reset() { *x = GetObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1586,7 +1719,7 @@ func (x *GetObjectRequest) String() string { func (*GetObjectRequest) ProtoMessage() {} func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[17] + mi := &file_google_storage_v2_storage_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1599,7 +1732,7 @@ func (x *GetObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead. func (*GetObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} } func (x *GetObjectRequest) GetBucket() string { @@ -1623,6 +1756,13 @@ func (x *GetObjectRequest) GetGeneration() int64 { return 0 } +func (x *GetObjectRequest) GetSoftDeleted() bool { + if x != nil && x.SoftDeleted != nil { + return *x.SoftDeleted + } + return false +} + func (x *GetObjectRequest) GetIfGenerationMatch() int64 { if x != nil && x.IfGenerationMatch != nil { return *x.IfGenerationMatch @@ -1692,7 +1832,7 @@ type ReadObjectResponse struct { func (x *ReadObjectResponse) Reset() { *x = ReadObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1705,7 +1845,7 @@ func (x *ReadObjectResponse) String() string { func (*ReadObjectResponse) ProtoMessage() {} func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[18] + mi := &file_google_storage_v2_storage_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1718,7 +1858,7 @@ func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead. func (*ReadObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} } func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData { @@ -1789,7 +1929,7 @@ type WriteObjectSpec struct { func (x *WriteObjectSpec) Reset() { *x = WriteObjectSpec{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1802,7 +1942,7 @@ func (x *WriteObjectSpec) String() string { func (*WriteObjectSpec) ProtoMessage() {} func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[19] + mi := &file_google_storage_v2_storage_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1815,7 +1955,7 @@ func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead. func (*WriteObjectSpec) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} } func (x *WriteObjectSpec) GetResource() *Object { @@ -1917,7 +2057,7 @@ type WriteObjectRequest struct { func (x *WriteObjectRequest) Reset() { *x = WriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1930,7 +2070,7 @@ func (x *WriteObjectRequest) String() string { func (*WriteObjectRequest) ProtoMessage() {} func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[20] + mi := &file_google_storage_v2_storage_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1943,7 +2083,7 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead. func (*WriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} } func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage { @@ -2058,7 +2198,7 @@ type WriteObjectResponse struct { func (x *WriteObjectResponse) Reset() { *x = WriteObjectResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2071,7 +2211,7 @@ func (x *WriteObjectResponse) String() string { func (*WriteObjectResponse) ProtoMessage() {} func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[21] + mi := &file_google_storage_v2_storage_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2084,7 +2224,7 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead. func (*WriteObjectResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} } func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus { @@ -2128,6 +2268,294 @@ func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {} func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {} +// Request message for BidiWriteObject. +type BidiWriteObjectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The first message of each stream should set one of the following. + // + // Types that are assignable to FirstMessage: + // *BidiWriteObjectRequest_UploadId + // *BidiWriteObjectRequest_WriteObjectSpec + FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"` + // Required. The offset from the beginning of the object at which the data + // should be written. + // + // In the first `WriteObjectRequest` of a `WriteObject()` action, it + // indicates the initial offset for the `Write()` call. The value **must** be + // equal to the `persisted_size` that a call to `QueryWriteStatus()` would + // return (0 if this is the first write to the object). + // + // On subsequent calls, this value **must** be no larger than the sum of the + // first `write_offset` and the sizes of all `data` chunks sent previously on + // this stream. + // + // An invalid value will cause an error. + WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"` + // A portion of the data for the object. + // + // Types that are assignable to Data: + // *BidiWriteObjectRequest_ChecksummedData + Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"` + // Checksums for the complete object. If the checksums computed by the service + // don't match the specified checksums the call will fail. May only be + // provided in the first or last request (either with first_message, or + // finish_write set). + ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"` + // For each BidiWriteObjectRequest where state_lookup is `true` or the client + // closes the stream, the service will send a BidiWriteObjectResponse + // containing the current persisted size. The persisted size sent in responses + // covers all the bytes the server has persisted thus far and can be used to + // decide what data is safe for the client to drop. Note that the object's + // current size reported by the BidiWriteObjectResponse may lag behind the + // number of bytes written by the client. + StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"` + // Persists data written on the stream, up to and including the current + // message, to permanent storage. This option should be used sparingly as it + // may reduce performance. Ongoing writes will periodically be persisted on + // the server even when `flush` is not set. + Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"` + // If `true`, this indicates that the write is complete. Sending any + // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true` + // will cause an error. + // For a non-resumable write (where the upload_id was not set in the first + // message), it is an error not to set this field in the final message of the + // stream. + FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"` + // A set of parameters common to Storage API requests concerning an object. + CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` +} + +func (x *BidiWriteObjectRequest) Reset() { + *x = BidiWriteObjectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BidiWriteObjectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BidiWriteObjectRequest) ProtoMessage() {} + +func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead. +func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} +} + +func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage { + if m != nil { + return m.FirstMessage + } + return nil +} + +func (x *BidiWriteObjectRequest) GetUploadId() string { + if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok { + return x.UploadId + } + return "" +} + +func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec { + if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok { + return x.WriteObjectSpec + } + return nil +} + +func (x *BidiWriteObjectRequest) GetWriteOffset() int64 { + if x != nil { + return x.WriteOffset + } + return 0 +} + +func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data { + if m != nil { + return m.Data + } + return nil +} + +func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData { + if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok { + return x.ChecksummedData + } + return nil +} + +func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums { + if x != nil { + return x.ObjectChecksums + } + return nil +} + +func (x *BidiWriteObjectRequest) GetStateLookup() bool { + if x != nil { + return x.StateLookup + } + return false +} + +func (x *BidiWriteObjectRequest) GetFlush() bool { + if x != nil { + return x.Flush + } + return false +} + +func (x *BidiWriteObjectRequest) GetFinishWrite() bool { + if x != nil { + return x.FinishWrite + } + return false +} + +func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams { + if x != nil { + return x.CommonObjectRequestParams + } + return nil +} + +type isBidiWriteObjectRequest_FirstMessage interface { + isBidiWriteObjectRequest_FirstMessage() +} + +type BidiWriteObjectRequest_UploadId struct { + // For resumable uploads. This should be the `upload_id` returned from a + // call to `StartResumableWriteResponse`. + UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"` +} + +type BidiWriteObjectRequest_WriteObjectSpec struct { + // For non-resumable uploads. Describes the overall upload, including the + // destination bucket and object name, preconditions, etc. + WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"` +} + +func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {} + +func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {} + +type isBidiWriteObjectRequest_Data interface { + isBidiWriteObjectRequest_Data() +} + +type BidiWriteObjectRequest_ChecksummedData struct { + // The data to insert. If a crc32c checksum is provided that doesn't match + // the checksum computed by the service, the request will fail. + ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"` +} + +func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {} + +// Response message for BidiWriteObject. +type BidiWriteObjectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The response will set one of the following. + // + // Types that are assignable to WriteStatus: + // *BidiWriteObjectResponse_PersistedSize + // *BidiWriteObjectResponse_Resource + WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"` +} + +func (x *BidiWriteObjectResponse) Reset() { + *x = BidiWriteObjectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BidiWriteObjectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BidiWriteObjectResponse) ProtoMessage() {} + +func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead. +func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} +} + +func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus { + if m != nil { + return m.WriteStatus + } + return nil +} + +func (x *BidiWriteObjectResponse) GetPersistedSize() int64 { + if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok { + return x.PersistedSize + } + return 0 +} + +func (x *BidiWriteObjectResponse) GetResource() *Object { + if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok { + return x.Resource + } + return nil +} + +type isBidiWriteObjectResponse_WriteStatus interface { + isBidiWriteObjectResponse_WriteStatus() +} + +type BidiWriteObjectResponse_PersistedSize struct { + // The total number of bytes that have been processed for the given object + // from all `WriteObject` calls. Only set if the upload has not finalized. + PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"` +} + +type BidiWriteObjectResponse_Resource struct { + // A resource containing the metadata for the uploaded object. Only set if + // the upload has finalized. + Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"` +} + +func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {} + +func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {} + // Request message for ListObjects. type ListObjectsRequest struct { state protoimpl.MessageState @@ -2177,12 +2605,20 @@ type ListObjectsRequest struct { // listed have names between lexicographic_start (inclusive) and // lexicographic_end (exclusive). LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"` + // Optional. If true, only list all soft-deleted versions of the object. + // Soft delete policy is required to set this option. + SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"` + // Optional. Filter results to objects and prefixes that match this glob + // pattern. See [List Objects Using + // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob) + // for the full syntax. + MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"` } func (x *ListObjectsRequest) Reset() { *x = ListObjectsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] + mi := &file_google_storage_v2_storage_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2195,7 +2631,7 @@ func (x *ListObjectsRequest) String() string { func (*ListObjectsRequest) ProtoMessage() {} func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[22] + mi := &file_google_storage_v2_storage_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2208,7 +2644,7 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead. func (*ListObjectsRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} } func (x *ListObjectsRequest) GetParent() string { @@ -2281,6 +2717,20 @@ func (x *ListObjectsRequest) GetLexicographicEnd() string { return "" } +func (x *ListObjectsRequest) GetSoftDeleted() bool { + if x != nil { + return x.SoftDeleted + } + return false +} + +func (x *ListObjectsRequest) GetMatchGlob() string { + if x != nil { + return x.MatchGlob + } + return "" +} + // Request object for `QueryWriteStatus`. type QueryWriteStatusRequest struct { state protoimpl.MessageState @@ -2297,7 +2747,7 @@ type QueryWriteStatusRequest struct { func (x *QueryWriteStatusRequest) Reset() { *x = QueryWriteStatusRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2310,7 +2760,7 @@ func (x *QueryWriteStatusRequest) String() string { func (*QueryWriteStatusRequest) ProtoMessage() {} func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[23] + mi := &file_google_storage_v2_storage_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2323,7 +2773,7 @@ func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead. func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} } func (x *QueryWriteStatusRequest) GetUploadId() string { @@ -2357,7 +2807,7 @@ type QueryWriteStatusResponse struct { func (x *QueryWriteStatusResponse) Reset() { *x = QueryWriteStatusResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2370,7 +2820,7 @@ func (x *QueryWriteStatusResponse) String() string { func (*QueryWriteStatusResponse) ProtoMessage() {} func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[24] + mi := &file_google_storage_v2_storage_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2383,7 +2833,7 @@ func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead. func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} } func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus { @@ -2541,7 +2991,7 @@ type RewriteObjectRequest struct { func (x *RewriteObjectRequest) Reset() { *x = RewriteObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2554,7 +3004,7 @@ func (x *RewriteObjectRequest) String() string { func (*RewriteObjectRequest) ProtoMessage() {} func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[25] + mi := &file_google_storage_v2_storage_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2567,7 +3017,7 @@ func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead. func (*RewriteObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} } func (x *RewriteObjectRequest) GetDestinationName() string { @@ -2757,7 +3207,7 @@ type RewriteResponse struct { func (x *RewriteResponse) Reset() { *x = RewriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2770,7 +3220,7 @@ func (x *RewriteResponse) String() string { func (*RewriteResponse) ProtoMessage() {} func (x *RewriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[26] + mi := &file_google_storage_v2_storage_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2783,7 +3233,7 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead. func (*RewriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} } func (x *RewriteResponse) GetTotalBytesRewritten() int64 { @@ -2842,7 +3292,7 @@ type StartResumableWriteRequest struct { func (x *StartResumableWriteRequest) Reset() { *x = StartResumableWriteRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2855,7 +3305,7 @@ func (x *StartResumableWriteRequest) String() string { func (*StartResumableWriteRequest) ProtoMessage() {} func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[27] + mi := &file_google_storage_v2_storage_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2868,7 +3318,7 @@ func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead. func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} } func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec { @@ -2906,7 +3356,7 @@ type StartResumableWriteResponse struct { func (x *StartResumableWriteResponse) Reset() { *x = StartResumableWriteResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2919,7 +3369,7 @@ func (x *StartResumableWriteResponse) String() string { func (*StartResumableWriteResponse) ProtoMessage() {} func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[28] + mi := &file_google_storage_v2_storage_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2932,7 +3382,7 @@ func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead. func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} } func (x *StartResumableWriteResponse) GetUploadId() string { @@ -2989,7 +3439,7 @@ type UpdateObjectRequest struct { func (x *UpdateObjectRequest) Reset() { *x = UpdateObjectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3002,7 +3452,7 @@ func (x *UpdateObjectRequest) String() string { func (*UpdateObjectRequest) ProtoMessage() {} func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[29] + mi := &file_google_storage_v2_storage_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3015,7 +3465,7 @@ func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead. func (*UpdateObjectRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} } func (x *UpdateObjectRequest) GetObject() *Object { @@ -3088,7 +3538,7 @@ type GetServiceAccountRequest struct { func (x *GetServiceAccountRequest) Reset() { *x = GetServiceAccountRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3101,7 +3551,7 @@ func (x *GetServiceAccountRequest) String() string { func (*GetServiceAccountRequest) ProtoMessage() {} func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[30] + mi := &file_google_storage_v2_storage_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3114,7 +3564,7 @@ func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead. func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} } func (x *GetServiceAccountRequest) GetProject() string { @@ -3141,7 +3591,7 @@ type CreateHmacKeyRequest struct { func (x *CreateHmacKeyRequest) Reset() { *x = CreateHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3154,7 +3604,7 @@ func (x *CreateHmacKeyRequest) String() string { func (*CreateHmacKeyRequest) ProtoMessage() {} func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[31] + mi := &file_google_storage_v2_storage_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3167,7 +3617,7 @@ func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead. func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} } func (x *CreateHmacKeyRequest) GetProject() string { @@ -3200,7 +3650,7 @@ type CreateHmacKeyResponse struct { func (x *CreateHmacKeyResponse) Reset() { *x = CreateHmacKeyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3213,7 +3663,7 @@ func (x *CreateHmacKeyResponse) String() string { func (*CreateHmacKeyResponse) ProtoMessage() {} func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[32] + mi := &file_google_storage_v2_storage_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3226,7 +3676,7 @@ func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead. func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} } func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata { @@ -3260,7 +3710,7 @@ type DeleteHmacKeyRequest struct { func (x *DeleteHmacKeyRequest) Reset() { *x = DeleteHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3273,7 +3723,7 @@ func (x *DeleteHmacKeyRequest) String() string { func (*DeleteHmacKeyRequest) ProtoMessage() {} func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[33] + mi := &file_google_storage_v2_storage_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3286,7 +3736,7 @@ func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead. func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} } func (x *DeleteHmacKeyRequest) GetAccessId() string { @@ -3320,7 +3770,7 @@ type GetHmacKeyRequest struct { func (x *GetHmacKeyRequest) Reset() { *x = GetHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3333,7 +3783,7 @@ func (x *GetHmacKeyRequest) String() string { func (*GetHmacKeyRequest) ProtoMessage() {} func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[34] + mi := &file_google_storage_v2_storage_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3346,7 +3796,7 @@ func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead. func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} } func (x *GetHmacKeyRequest) GetAccessId() string { @@ -3386,7 +3836,7 @@ type ListHmacKeysRequest struct { func (x *ListHmacKeysRequest) Reset() { *x = ListHmacKeysRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3399,7 +3849,7 @@ func (x *ListHmacKeysRequest) String() string { func (*ListHmacKeysRequest) ProtoMessage() {} func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[35] + mi := &file_google_storage_v2_storage_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3412,7 +3862,7 @@ func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead. func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} } func (x *ListHmacKeysRequest) GetProject() string { @@ -3466,7 +3916,7 @@ type ListHmacKeysResponse struct { func (x *ListHmacKeysResponse) Reset() { *x = ListHmacKeysResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3479,7 +3929,7 @@ func (x *ListHmacKeysResponse) String() string { func (*ListHmacKeysResponse) ProtoMessage() {} func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[36] + mi := &file_google_storage_v2_storage_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3492,7 +3942,7 @@ func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead. func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} } func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata { @@ -3532,7 +3982,7 @@ type UpdateHmacKeyRequest struct { func (x *UpdateHmacKeyRequest) Reset() { *x = UpdateHmacKeyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3545,7 +3995,7 @@ func (x *UpdateHmacKeyRequest) String() string { func (*UpdateHmacKeyRequest) ProtoMessage() {} func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[37] + mi := &file_google_storage_v2_storage_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3558,7 +4008,7 @@ func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead. func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} } func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { @@ -3595,7 +4045,7 @@ type CommonObjectRequestParams struct { func (x *CommonObjectRequestParams) Reset() { *x = CommonObjectRequestParams{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3608,7 +4058,7 @@ func (x *CommonObjectRequestParams) String() string { func (*CommonObjectRequestParams) ProtoMessage() {} func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[38] + mi := &file_google_storage_v2_storage_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3621,7 +4071,7 @@ func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message { // Deprecated: Use CommonObjectRequestParams.ProtoReflect.Descriptor instead. func (*CommonObjectRequestParams) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} } func (x *CommonObjectRequestParams) GetEncryptionAlgorithm() string { @@ -3655,7 +4105,7 @@ type ServiceConstants struct { func (x *ServiceConstants) Reset() { *x = ServiceConstants{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3668,7 +4118,7 @@ func (x *ServiceConstants) String() string { func (*ServiceConstants) ProtoMessage() {} func (x *ServiceConstants) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[39] + mi := &file_google_storage_v2_storage_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3681,7 +4131,7 @@ func (x *ServiceConstants) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceConstants.ProtoReflect.Descriptor instead. func (*ServiceConstants) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} } // A bucket. @@ -3706,8 +4156,6 @@ type Bucket struct { // {projectIdentifier} can be the project ID or project number. Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"` // Output only. The metadata generation of this bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` // Immutable. The location of the bucket. Object data for objects in the // bucket resides in physical storage within this region. Defaults to `US`. @@ -3731,7 +4179,7 @@ type Bucket struct { // replication. "ASYNC_TURBO" enables turbo replication, valid for dual-region // buckets only. If rpo is not specified when the bucket is created, it // defaults to "DEFAULT". For more information, see - // https://cloud.google.com/storage/docs/turbo-replication. + // https://cloud.google.com/storage/docs/availability-durability#turbo-replication. Rpo string `protobuf:"bytes,27,opt,name=rpo,proto3" json:"rpo,omitempty"` // Access controls on the bucket. // If iam_config.uniform_bucket_level_access is enabled on this bucket, @@ -3746,15 +4194,11 @@ type Bucket struct { // for more information. Lifecycle *Bucket_Lifecycle `protobuf:"bytes,10,opt,name=lifecycle,proto3" json:"lifecycle,omitempty"` // Output only. The creation time of the bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] // (CORS) config. Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"` // Output only. The modification time of the bucket. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // The default value for event-based hold on newly created objects in this // bucket. Event-based hold is a way to retain objects indefinitely until an @@ -3809,12 +4253,15 @@ type Bucket struct { // The bucket's Autoclass configuration. If there is no configuration, the // Autoclass feature will be disabled and have no effect on the bucket. Autoclass *Bucket_Autoclass `protobuf:"bytes,28,opt,name=autoclass,proto3" json:"autoclass,omitempty"` + // Optional. The bucket's soft delete policy. The soft delete policy prevents + // soft-deleted objects from being permanently deleted. + SoftDeletePolicy *Bucket_SoftDeletePolicy `protobuf:"bytes,31,opt,name=soft_delete_policy,json=softDeletePolicy,proto3" json:"soft_delete_policy,omitempty"` } func (x *Bucket) Reset() { *x = Bucket{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3827,7 +4274,7 @@ func (x *Bucket) String() string { func (*Bucket) ProtoMessage() {} func (x *Bucket) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[40] + mi := &file_google_storage_v2_storage_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3840,7 +4287,7 @@ func (x *Bucket) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket.ProtoReflect.Descriptor instead. func (*Bucket) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} } func (x *Bucket) GetName() string { @@ -4039,6 +4486,13 @@ func (x *Bucket) GetAutoclass() *Bucket_Autoclass { return nil } +func (x *Bucket) GetSoftDeletePolicy() *Bucket_SoftDeletePolicy { + if x != nil { + return x.SoftDeletePolicy + } + return nil +} + // An access-control entry. type BucketAccessControl struct { state protoimpl.MessageState @@ -4089,7 +4543,7 @@ type BucketAccessControl struct { func (x *BucketAccessControl) Reset() { *x = BucketAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4102,7 +4556,7 @@ func (x *BucketAccessControl) String() string { func (*BucketAccessControl) ProtoMessage() {} func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[41] + mi := &file_google_storage_v2_storage_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4115,7 +4569,7 @@ func (x *BucketAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use BucketAccessControl.ProtoReflect.Descriptor instead. func (*BucketAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{41} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} } func (x *BucketAccessControl) GetRole() string { @@ -4188,7 +4642,7 @@ type ChecksummedData struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The data. + // Optional. The data. Content []byte `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"` // If set, the CRC32C digest of the content field. Crc32C *uint32 `protobuf:"fixed32,2,opt,name=crc32c,proto3,oneof" json:"crc32c,omitempty"` @@ -4197,7 +4651,7 @@ type ChecksummedData struct { func (x *ChecksummedData) Reset() { *x = ChecksummedData{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4210,7 +4664,7 @@ func (x *ChecksummedData) String() string { func (*ChecksummedData) ProtoMessage() {} func (x *ChecksummedData) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[42] + mi := &file_google_storage_v2_storage_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4223,7 +4677,7 @@ func (x *ChecksummedData) ProtoReflect() protoreflect.Message { // Deprecated: Use ChecksummedData.ProtoReflect.Descriptor instead. func (*ChecksummedData) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{42} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} } func (x *ChecksummedData) GetContent() []byte { @@ -4264,7 +4718,7 @@ type ObjectChecksums struct { func (x *ObjectChecksums) Reset() { *x = ObjectChecksums{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4277,7 +4731,7 @@ func (x *ObjectChecksums) String() string { func (*ObjectChecksums) ProtoMessage() {} func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[43] + mi := &file_google_storage_v2_storage_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4290,7 +4744,7 @@ func (x *ObjectChecksums) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead. func (*ObjectChecksums) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} } func (x *ObjectChecksums) GetCrc32C() uint32 { @@ -4339,7 +4793,7 @@ type HmacKeyMetadata struct { func (x *HmacKeyMetadata) Reset() { *x = HmacKeyMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4352,7 +4806,7 @@ func (x *HmacKeyMetadata) String() string { func (*HmacKeyMetadata) ProtoMessage() {} func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[44] + mi := &file_google_storage_v2_storage_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4365,7 +4819,7 @@ func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead. func (*HmacKeyMetadata) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{44} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} } func (x *HmacKeyMetadata) GetId() string { @@ -4459,7 +4913,7 @@ type NotificationConfig struct { func (x *NotificationConfig) Reset() { *x = NotificationConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] + mi := &file_google_storage_v2_storage_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4472,7 +4926,7 @@ func (x *NotificationConfig) String() string { func (*NotificationConfig) ProtoMessage() {} func (x *NotificationConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[45] + mi := &file_google_storage_v2_storage_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4485,7 +4939,7 @@ func (x *NotificationConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead. func (*NotificationConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{45} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} } func (x *NotificationConfig) GetName() string { @@ -4554,7 +5008,7 @@ type CustomerEncryption struct { func (x *CustomerEncryption) Reset() { *x = CustomerEncryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4567,7 +5021,7 @@ func (x *CustomerEncryption) String() string { func (*CustomerEncryption) ProtoMessage() {} func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[46] + mi := &file_google_storage_v2_storage_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4580,7 +5034,7 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message { // Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead. func (*CustomerEncryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} } func (x *CustomerEncryption) GetEncryptionAlgorithm() string { @@ -4619,21 +5073,17 @@ type Object struct { // object. Etag string `protobuf:"bytes,27,opt,name=etag,proto3" json:"etag,omitempty"` // Immutable. The content generation of this object. Used for object - // versioning. Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // versioning. Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"` // Output only. The version of the metadata for this generation of this // object. Used for preconditions and for detecting changes in metadata. A // metageneration number is only meaningful in the context of a particular - // generation of a particular object. Attempting to set or update this field - // will result in a [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // generation of a particular object. Metageneration int64 `protobuf:"varint,4,opt,name=metageneration,proto3" json:"metageneration,omitempty"` // Storage class of the object. StorageClass string `protobuf:"bytes,5,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` // Output only. Content-Length of the object data in bytes, matching // [https://tools.ietf.org/html/rfc7230#section-3.3.2][RFC 7230 §3.3.2]. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. Size int64 `protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` // Content-Encoding of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.2.2][RFC 7231 §3.1.2.2] @@ -4654,8 +5104,7 @@ type Object struct { // [https://tools.ietf.org/html/rfc7231#section-3.1.3.2][RFC 7231 §3.1.3.2]. ContentLanguage string `protobuf:"bytes,11,opt,name=content_language,json=contentLanguage,proto3" json:"content_language,omitempty"` // Output only. If this object is noncurrent, this is the time when the object - // became noncurrent. Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // became noncurrent. DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` // Content-Type of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. @@ -4663,13 +5112,9 @@ type Object struct { // `application/octet-stream`. ContentType string `protobuf:"bytes,13,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` // Output only. The creation time of the object. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Output only. Number of underlying components that make up this object. - // Components are accumulated by compose operations. Attempting to set or - // update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // Components are accumulated by compose operations. ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"` // Output only. Hashes for the data part of this object. This field is used // for output only and will be silently ignored if provided in requests. @@ -4680,16 +5125,12 @@ type Object struct { // such as modifying custom metadata, as well as changes made by Cloud Storage // on behalf of a requester, such as changing the storage class based on an // Object Lifecycle Configuration. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // Cloud KMS Key used to encrypt this object, if the object is encrypted by // such a key. KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` // Output only. The time at which the object's storage class was last changed. // When the object is initially created, it will be set to time_created. - // Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` // Whether an object is under temporary hold. While this flag is set to true, // the object is protected against deletion and overwrites. A common use case @@ -4720,8 +5161,7 @@ type Object struct { // In a response, this field will always be set to true or false. EventBasedHold *bool `protobuf:"varint,23,opt,name=event_based_hold,json=eventBasedHold,proto3,oneof" json:"event_based_hold,omitempty"` // Output only. The owner of the object. This will always be the uploader of - // the object. Attempting to set or update this field will result in a - // [FieldViolation][google.rpc.BadRequest.FieldViolation]. + // the object. Owner *Owner `protobuf:"bytes,24,opt,name=owner,proto3" json:"owner,omitempty"` // Metadata of Customer-Supplied Encryption Key, if the object is encrypted by // such a key. @@ -4733,7 +5173,7 @@ type Object struct { func (x *Object) Reset() { *x = Object{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4746,7 +5186,7 @@ func (x *Object) String() string { func (*Object) ProtoMessage() {} func (x *Object) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[47] + mi := &file_google_storage_v2_storage_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4759,7 +5199,7 @@ func (x *Object) ProtoReflect() protoreflect.Message { // Deprecated: Use Object.ProtoReflect.Descriptor instead. func (*Object) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} } func (x *Object) GetName() string { @@ -5001,7 +5441,7 @@ type ObjectAccessControl struct { func (x *ObjectAccessControl) Reset() { *x = ObjectAccessControl{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5014,7 +5454,7 @@ func (x *ObjectAccessControl) String() string { func (*ObjectAccessControl) ProtoMessage() {} func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[48] + mi := &file_google_storage_v2_storage_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5027,7 +5467,7 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message { // Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead. func (*ObjectAccessControl) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} } func (x *ObjectAccessControl) GetRole() string { @@ -5112,7 +5552,7 @@ type ListObjectsResponse struct { func (x *ListObjectsResponse) Reset() { *x = ListObjectsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5125,7 +5565,7 @@ func (x *ListObjectsResponse) String() string { func (*ListObjectsResponse) ProtoMessage() {} func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[49] + mi := &file_google_storage_v2_storage_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5138,7 +5578,7 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead. func (*ListObjectsResponse) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} } func (x *ListObjectsResponse) GetObjects() []*Object { @@ -5177,7 +5617,7 @@ type ProjectTeam struct { func (x *ProjectTeam) Reset() { *x = ProjectTeam{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5190,7 +5630,7 @@ func (x *ProjectTeam) String() string { func (*ProjectTeam) ProtoMessage() {} func (x *ProjectTeam) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[50] + mi := &file_google_storage_v2_storage_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5203,7 +5643,7 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message { // Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead. func (*ProjectTeam) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} } func (x *ProjectTeam) GetProjectNumber() string { @@ -5235,7 +5675,7 @@ type ServiceAccount struct { func (x *ServiceAccount) Reset() { *x = ServiceAccount{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5248,7 +5688,7 @@ func (x *ServiceAccount) String() string { func (*ServiceAccount) ProtoMessage() {} func (x *ServiceAccount) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[51] + mi := &file_google_storage_v2_storage_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5261,7 +5701,7 @@ func (x *ServiceAccount) ProtoReflect() protoreflect.Message { // Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead. func (*ServiceAccount) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54} } func (x *ServiceAccount) GetEmailAddress() string { @@ -5286,7 +5726,7 @@ type Owner struct { func (x *Owner) Reset() { *x = Owner{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5299,7 +5739,7 @@ func (x *Owner) String() string { func (*Owner) ProtoMessage() {} func (x *Owner) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[52] + mi := &file_google_storage_v2_storage_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5312,7 +5752,7 @@ func (x *Owner) ProtoReflect() protoreflect.Message { // Deprecated: Use Owner.ProtoReflect.Descriptor instead. func (*Owner) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55} } func (x *Owner) GetEntity() string { @@ -5346,7 +5786,7 @@ type ContentRange struct { func (x *ContentRange) Reset() { *x = ContentRange{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5359,7 +5799,7 @@ func (x *ContentRange) String() string { func (*ContentRange) ProtoMessage() {} func (x *ContentRange) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[53] + mi := &file_google_storage_v2_storage_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5372,7 +5812,7 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message { // Deprecated: Use ContentRange.ProtoReflect.Descriptor instead. func (*ContentRange) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56} } func (x *ContentRange) GetStart() int64 { @@ -5414,7 +5854,7 @@ type ComposeObjectRequest_SourceObject struct { func (x *ComposeObjectRequest_SourceObject) Reset() { *x = ComposeObjectRequest_SourceObject{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5427,7 +5867,7 @@ func (x *ComposeObjectRequest_SourceObject) String() string { func (*ComposeObjectRequest_SourceObject) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[54] + mi := &file_google_storage_v2_storage_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5479,7 +5919,7 @@ type ComposeObjectRequest_SourceObject_ObjectPreconditions struct { func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() { *x = ComposeObjectRequest_SourceObject_ObjectPreconditions{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5492,7 +5932,7 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {} func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[55] + mi := &file_google_storage_v2_storage_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5528,7 +5968,7 @@ type Bucket_Billing struct { func (x *Bucket_Billing) Reset() { *x = Bucket_Billing{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5541,7 +5981,7 @@ func (x *Bucket_Billing) String() string { func (*Bucket_Billing) ProtoMessage() {} func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[56] + mi := &file_google_storage_v2_storage_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5554,7 +5994,7 @@ func (x *Bucket_Billing) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Billing.ProtoReflect.Descriptor instead. func (*Bucket_Billing) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 0} } func (x *Bucket_Billing) GetRequesterPays() bool { @@ -5594,7 +6034,7 @@ type Bucket_Cors struct { func (x *Bucket_Cors) Reset() { *x = Bucket_Cors{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5607,7 +6047,7 @@ func (x *Bucket_Cors) String() string { func (*Bucket_Cors) ProtoMessage() {} func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[57] + mi := &file_google_storage_v2_storage_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5620,7 +6060,7 @@ func (x *Bucket_Cors) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Cors.ProtoReflect.Descriptor instead. func (*Bucket_Cors) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 1} } func (x *Bucket_Cors) GetOrigin() []string { @@ -5665,7 +6105,7 @@ type Bucket_Encryption struct { func (x *Bucket_Encryption) Reset() { *x = Bucket_Encryption{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5678,7 +6118,7 @@ func (x *Bucket_Encryption) String() string { func (*Bucket_Encryption) ProtoMessage() {} func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[58] + mi := &file_google_storage_v2_storage_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5691,7 +6131,7 @@ func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Encryption.ProtoReflect.Descriptor instead. func (*Bucket_Encryption) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 2} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 2} } func (x *Bucket_Encryption) GetDefaultKmsKey() string { @@ -5717,7 +6157,7 @@ type Bucket_IamConfig struct { func (x *Bucket_IamConfig) Reset() { *x = Bucket_IamConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5730,7 +6170,7 @@ func (x *Bucket_IamConfig) String() string { func (*Bucket_IamConfig) ProtoMessage() {} func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[59] + mi := &file_google_storage_v2_storage_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5743,7 +6183,7 @@ func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_IamConfig.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3} } func (x *Bucket_IamConfig) GetUniformBucketLevelAccess() *Bucket_IamConfig_UniformBucketLevelAccess { @@ -5775,7 +6215,7 @@ type Bucket_Lifecycle struct { func (x *Bucket_Lifecycle) Reset() { *x = Bucket_Lifecycle{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5788,7 +6228,7 @@ func (x *Bucket_Lifecycle) String() string { func (*Bucket_Lifecycle) ProtoMessage() {} func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[60] + mi := &file_google_storage_v2_storage_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5801,7 +6241,7 @@ func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4} } func (x *Bucket_Lifecycle) GetRule() []*Bucket_Lifecycle_Rule { @@ -5827,7 +6267,7 @@ type Bucket_Logging struct { func (x *Bucket_Logging) Reset() { *x = Bucket_Logging{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5840,7 +6280,7 @@ func (x *Bucket_Logging) String() string { func (*Bucket_Logging) ProtoMessage() {} func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[61] + mi := &file_google_storage_v2_storage_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5853,7 +6293,7 @@ func (x *Bucket_Logging) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Logging.ProtoReflect.Descriptor instead. func (*Bucket_Logging) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 5} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 5} } func (x *Bucket_Logging) GetLogBucket() string { @@ -5892,7 +6332,7 @@ type Bucket_RetentionPolicy struct { func (x *Bucket_RetentionPolicy) Reset() { *x = Bucket_RetentionPolicy{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5905,7 +6345,7 @@ func (x *Bucket_RetentionPolicy) String() string { func (*Bucket_RetentionPolicy) ProtoMessage() {} func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[62] + mi := &file_google_storage_v2_storage_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5918,7 +6358,7 @@ func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_RetentionPolicy.ProtoReflect.Descriptor instead. func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 6} } func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { @@ -5942,6 +6382,66 @@ func (x *Bucket_RetentionPolicy) GetRetentionDuration() *durationpb.Duration { return nil } +// Soft delete policy properties of a bucket. +type Bucket_SoftDeletePolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The period of time that soft-deleted objects in the bucket must be + // retained and cannot be permanently deleted. The duration must be greater + // than or equal to 7 days and less than 1 year. + RetentionDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=retention_duration,json=retentionDuration,proto3,oneof" json:"retention_duration,omitempty"` + // Time from which the policy was effective. This is service-provided. + EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=effective_time,json=effectiveTime,proto3,oneof" json:"effective_time,omitempty"` +} + +func (x *Bucket_SoftDeletePolicy) Reset() { + *x = Bucket_SoftDeletePolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Bucket_SoftDeletePolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Bucket_SoftDeletePolicy) ProtoMessage() {} + +func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_storage_v2_storage_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Bucket_SoftDeletePolicy.ProtoReflect.Descriptor instead. +func (*Bucket_SoftDeletePolicy) Descriptor() ([]byte, []int) { + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 7} +} + +func (x *Bucket_SoftDeletePolicy) GetRetentionDuration() *durationpb.Duration { + if x != nil { + return x.RetentionDuration + } + return nil +} + +func (x *Bucket_SoftDeletePolicy) GetEffectiveTime() *timestamppb.Timestamp { + if x != nil { + return x.EffectiveTime + } + return nil +} + // Properties of a bucket related to versioning. // For more on Cloud Storage versioning, see // https://cloud.google.com/storage/docs/object-versioning. @@ -5957,7 +6457,7 @@ type Bucket_Versioning struct { func (x *Bucket_Versioning) Reset() { *x = Bucket_Versioning{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5970,7 +6470,7 @@ func (x *Bucket_Versioning) String() string { func (*Bucket_Versioning) ProtoMessage() {} func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[63] + mi := &file_google_storage_v2_storage_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5983,7 +6483,7 @@ func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Versioning.ProtoReflect.Descriptor instead. func (*Bucket_Versioning) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 7} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 8} } func (x *Bucket_Versioning) GetEnabled() bool { @@ -6017,7 +6517,7 @@ type Bucket_Website struct { func (x *Bucket_Website) Reset() { *x = Bucket_Website{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6030,7 +6530,7 @@ func (x *Bucket_Website) String() string { func (*Bucket_Website) ProtoMessage() {} func (x *Bucket_Website) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[64] + mi := &file_google_storage_v2_storage_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6043,7 +6543,7 @@ func (x *Bucket_Website) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Website.ProtoReflect.Descriptor instead. func (*Bucket_Website) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 8} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 9} } func (x *Bucket_Website) GetMainPageSuffix() string { @@ -6075,7 +6575,7 @@ type Bucket_CustomPlacementConfig struct { func (x *Bucket_CustomPlacementConfig) Reset() { *x = Bucket_CustomPlacementConfig{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] + mi := &file_google_storage_v2_storage_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6088,7 +6588,7 @@ func (x *Bucket_CustomPlacementConfig) String() string { func (*Bucket_CustomPlacementConfig) ProtoMessage() {} func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[65] + mi := &file_google_storage_v2_storage_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6101,7 +6601,7 @@ func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_CustomPlacementConfig.ProtoReflect.Descriptor instead. func (*Bucket_CustomPlacementConfig) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 9} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 10} } func (x *Bucket_CustomPlacementConfig) GetDataLocations() []string { @@ -6124,12 +6624,19 @@ type Bucket_Autoclass struct { // Autoclass is enabled when the bucket is created, the toggle_time is set // to the bucket creation time. ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` + // An object in an Autoclass bucket will eventually cool down to the + // terminal storage class if there is no access to the object. + // The only valid values are NEARLINE and ARCHIVE. + TerminalStorageClass *string `protobuf:"bytes,3,opt,name=terminal_storage_class,json=terminalStorageClass,proto3,oneof" json:"terminal_storage_class,omitempty"` + // Output only. Latest instant at which the autoclass terminal storage class + // was updated. + TerminalStorageClassUpdateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=terminal_storage_class_update_time,json=terminalStorageClassUpdateTime,proto3,oneof" json:"terminal_storage_class_update_time,omitempty"` } func (x *Bucket_Autoclass) Reset() { *x = Bucket_Autoclass{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6142,7 +6649,7 @@ func (x *Bucket_Autoclass) String() string { func (*Bucket_Autoclass) ProtoMessage() {} func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[66] + mi := &file_google_storage_v2_storage_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6155,7 +6662,7 @@ func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Autoclass.ProtoReflect.Descriptor instead. func (*Bucket_Autoclass) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 10} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 11} } func (x *Bucket_Autoclass) GetEnabled() bool { @@ -6172,6 +6679,20 @@ func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp { return nil } +func (x *Bucket_Autoclass) GetTerminalStorageClass() string { + if x != nil && x.TerminalStorageClass != nil { + return *x.TerminalStorageClass + } + return "" +} + +func (x *Bucket_Autoclass) GetTerminalStorageClassUpdateTime() *timestamppb.Timestamp { + if x != nil { + return x.TerminalStorageClassUpdateTime + } + return nil +} + // Settings for Uniform Bucket level access. // See https://cloud.google.com/storage/docs/uniform-bucket-level-access. type Bucket_IamConfig_UniformBucketLevelAccess struct { @@ -6191,7 +6712,7 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { *x = Bucket_IamConfig_UniformBucketLevelAccess{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6204,7 +6725,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string { func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {} func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[68] + mi := &file_google_storage_v2_storage_proto_msgTypes[72] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6217,7 +6738,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect. // Deprecated: Use Bucket_IamConfig_UniformBucketLevelAccess.ProtoReflect.Descriptor instead. func (*Bucket_IamConfig_UniformBucketLevelAccess) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 3, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 3, 0} } func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { @@ -6250,7 +6771,7 @@ type Bucket_Lifecycle_Rule struct { func (x *Bucket_Lifecycle_Rule) Reset() { *x = Bucket_Lifecycle_Rule{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6263,7 +6784,7 @@ func (x *Bucket_Lifecycle_Rule) String() string { func (*Bucket_Lifecycle_Rule) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[69] + mi := &file_google_storage_v2_storage_proto_msgTypes[73] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6276,7 +6797,7 @@ func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0} } func (x *Bucket_Lifecycle_Rule) GetAction() *Bucket_Lifecycle_Rule_Action { @@ -6310,7 +6831,7 @@ type Bucket_Lifecycle_Rule_Action struct { func (x *Bucket_Lifecycle_Rule_Action) Reset() { *x = Bucket_Lifecycle_Rule_Action{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] + mi := &file_google_storage_v2_storage_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6323,7 +6844,7 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string { func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[70] + mi := &file_google_storage_v2_storage_proto_msgTypes[74] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6336,7 +6857,7 @@ func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Action.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Action) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 0} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 0} } func (x *Bucket_Lifecycle_Rule_Action) GetType() string { @@ -6408,7 +6929,7 @@ type Bucket_Lifecycle_Rule_Condition struct { func (x *Bucket_Lifecycle_Rule_Condition) Reset() { *x = Bucket_Lifecycle_Rule_Condition{} if protoimpl.UnsafeEnabled { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] + mi := &file_google_storage_v2_storage_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -6421,7 +6942,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string { func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {} func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { - mi := &file_google_storage_v2_storage_proto_msgTypes[71] + mi := &file_google_storage_v2_storage_proto_msgTypes[75] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -6434,7 +6955,7 @@ func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message { // Deprecated: Use Bucket_Lifecycle_Rule_Condition.ProtoReflect.Descriptor instead. func (*Bucket_Lifecycle_Rule_Condition) Descriptor() ([]byte, []int) { - return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 4, 0, 1} + return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{43, 4, 0, 1} } func (x *Bucket_Lifecycle_Rule_Condition) GetAgeDays() int32 { @@ -6802,471 +7323,558 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x22, 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, - 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, - 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, - 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, - 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, - 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, - 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, - 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, - 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x22, 0xab, 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, - 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, - 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, + 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, - 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, - 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, - 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, - 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, - 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, - 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, - 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, - 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0c, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, - 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, - 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, - 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, - 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, + 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, + 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, + 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, - 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, - 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, - 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a, 0x0b, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, - 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x88, 0x01, - 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, + 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, + 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, + 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, + 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, + 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, + 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, + 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, + 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, + 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, + 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, + 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, + 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, + 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, - 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, - 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, - 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, - 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, - 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, - 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, - 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, - 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, - 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, - 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, - 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, - 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xd3, 0x03, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, - 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, - 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, - 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, - 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, - 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, - 0x75, 0x64, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, - 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, - 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, - 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, - 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, - 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, - 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x42, 0x0c, - 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, - 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, - 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, - 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, - 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, - 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, - 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, - 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, - 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, - 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, - 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, - 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, - 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, - 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, - 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, - 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, - 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, - 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, - 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, - 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, - 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, - 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, - 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, - 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, - 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, - 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, - 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, + 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, - 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, - 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, - 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, - 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, - 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, - 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, - 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, - 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, - 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, - 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, - 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, - 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, - 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, + 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, + 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, - 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, - 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, + 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xe4, + 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, + 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, + 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, + 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, + 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, + 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, + 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, + 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaf, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x44, 0x0a, 0x0d, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, + 0x67, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, + 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, + 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, + 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, + 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, + 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, + 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, + 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, + 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, + 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73, + 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, + 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, + 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, - 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, - 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, + 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16, + 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, + 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, + 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, + 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, + 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, + 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, + 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, + 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x22, 0x9f, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, + 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, + 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, + 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, + 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, + 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, + 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, + 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, + 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, + 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, + 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, + 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, + 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, + 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, + 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, + 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, + 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, + 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x18, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57, 0x0a, 0x12, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0xfa, + 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, + 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, + 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, + 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, + 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, + 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, + 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05, 0x52, 0x1a, 0x69, + 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x48, 0x0a, 0x1e, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, + 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, + 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, + 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61, 0x78, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x18, 0x6d, + 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, + 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, + 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, + 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63, 0x6f, 0x70, 0x79, + 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63, 0x6f, 0x70, 0x79, + 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f, 0x70, 0x79, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, + 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, - 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, - 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, - 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, - 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, - 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, - 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, - 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, - 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, - 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, - 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, - 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, - 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, - 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, - 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, - 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, - 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, - 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, - 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, - 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, - 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, - 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, - 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, - 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, + 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, + 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, + 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, + 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, + 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, + 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, + 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x77, 0x72, + 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, + 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, + 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, + 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, + 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, + 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x87, + 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x33, + 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, + 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, + 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, + 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, + 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, + 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, + 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, + 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, + 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, + 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, + 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, + 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, - 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, + 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, + 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, + 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, + 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x28, + 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, @@ -7274,893 +7882,952 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{ 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, + 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, - 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, - 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, - 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, - 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, - 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, - 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, - 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, - 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, - 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, - 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, - 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, - 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, - 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, - 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, - 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, - 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, - 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, - 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, - 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, - 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, - 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, - 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, - 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, - 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, - 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, - 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, - 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, - 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, - 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, - 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, - 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, - 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, - 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, - 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, - 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, - 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, - 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, - 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, - 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, - 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, - 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, - 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, - 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, - 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, - 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, - 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, - 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, - 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, - 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, - 0xab, 0x1e, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, - 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, - 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, - 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, - 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, - 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, - 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, - 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, - 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, - 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, - 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, - 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, - 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, - 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, - 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, - 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, - 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, - 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, - 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, - 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, - 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, - 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, - 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0x30, 0x0a, - 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, - 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, - 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, - 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, - 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, - 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, - 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, - 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, - 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, - 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, - 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, - 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, - 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, - 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, - 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, - 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, - 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, - 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, - 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, - 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, - 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, - 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, - 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, - 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, - 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, - 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, - 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, - 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, - 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, - 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, - 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, - 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, - 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, - 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, - 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, - 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, - 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, - 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, - 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, - 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, - 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, - 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, - 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, - 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, - 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, - 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, - 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x26, 0x0a, - 0x0a, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, - 0x12, 0x28, 0x0a, 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, - 0x66, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, - 0x50, 0x61, 0x67, 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, - 0x74, 0x5f, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, - 0x1a, 0x3e, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, - 0x61, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x1a, 0x67, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, - 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, - 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, - 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, - 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, - 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, - 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, - 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, - 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x57, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, - 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1c, 0x0a, 0x07, 0x63, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x02, 0x08, 0x01, 0x52, - 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, - 0x32, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, - 0x32, 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, - 0x22, 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, - 0x75, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, - 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, - 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, - 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, - 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, + 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x13, 0x4c, 0x69, + 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, 0x15, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, + 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x73, 0x68, 0x6f, + 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x7f, 0x0a, 0x14, + 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x68, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, + 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, + 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, + 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, + 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x6e, + 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, + 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, + 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a, 0x10, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x73, 0x22, 0xb5, + 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x56, 0x41, 0x4c, + 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x5f, 0x43, 0x48, + 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x1c, + 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43, 0x48, 0x55, 0x4e, + 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12, 0x19, 0x0a, 0x12, + 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, + 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, + 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, + 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, + 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x20, 0x12, 0x29, + 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, + 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, + 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24, 0x4d, 0x41, 0x58, + 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, + 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, + 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, + 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, + 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x22, + 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43, 0x4c, 0x45, 0x5f, + 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, + 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, + 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, + 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a, 0x2c, 0x4d, 0x41, + 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, + 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, + 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02, 0x12, 0x33, 0x0a, + 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, + 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, + 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, + 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, + 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x40, + 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x4b, + 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, + 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x53, 0x5f, + 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, + 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, + 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x5f, + 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, + 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54, 0x4f, 0x4b, 0x45, + 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44, 0x41, 0x59, 0x53, + 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xd0, 0x22, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, + 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, + 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, + 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x08, + 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, + 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x10, 0x0a, 0x03, + 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70, 0x6f, 0x12, 0x38, + 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x64, 0x65, + 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x41, + 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, + 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, + 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, - 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, - 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, - 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, - 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, + 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x6f, 0x72, + 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, + 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, + 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0f, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x57, 0x65, + 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x44, + 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x11, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, + 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x07, + 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, + 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x16, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0f, + 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x17, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, + 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, + 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x61, 0x74, 0x69, + 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, + 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x1c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, + 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, + 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25, + 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, + 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16, + 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27, + 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61, + 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a, + 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, + 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d, + 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02, + 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75, + 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65, + 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18, + 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, + 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69, + 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, + 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, + 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, + 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07, + 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, + 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e, + 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88, + 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65, + 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07, + 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, + 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75, + 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77, + 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a, + 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, + 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, + 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a, + 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, + 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47, + 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, + 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, + 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, + 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, + 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, + 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15, + 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, + 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, + 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, + 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67, + 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65, + 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, + 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42, + 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, + 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65, + 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f, + 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, + 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a, + 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67, + 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01, + 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01, + 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, + 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25, + 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, - 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, - 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, - 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, - 0x65, 0x73, 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, - 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, - 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, - 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, - 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, - 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, - 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, - 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, - 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, - 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, - 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, - 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, - 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, - 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, + 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, + 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, + 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, + 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, + 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, + 0x65, 0x61, 0x6d, 0x22, 0x5a, 0x0a, 0x0f, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, + 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x05, 0xe0, 0x41, 0x01, 0x08, 0x01, 0x52, 0x07, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, + 0x63, 0x88, 0x01, 0x01, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, + 0x54, 0x0a, 0x0f, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, + 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, + 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48, 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, + 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe, 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, + 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, + 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, + 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, + 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22, 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, + 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, + 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, + 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, + 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, + 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, + 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, + 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, + 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, + 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, + 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, + 0x73, 0x22, 0xec, 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, + 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, + 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, + 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, + 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, + 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, + 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, + 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, + 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, + 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, + 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, + 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, + 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, + 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, + 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, + 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, + 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, - 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, - 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, - 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, - 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, - 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, - 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, - 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, - 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, - 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, - 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, - 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, - 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, - 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, - 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, - 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, - 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, - 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, - 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, - 0x64, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, - 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, - 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, - 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, - 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, - 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, - 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, - 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, - 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, - 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, - 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, - 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, - 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, - 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, - 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x98, 0x26, 0x0a, - 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, + 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, + 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, + 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, + 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, + 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, + 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, + 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, + 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x3b, + 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, + 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x61, + 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x65, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, + 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, + 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13, 0x4c, + 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, + 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b, 0x50, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x3c, 0x0a, 0x05, + 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, + 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, + 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0x98, 0x28, 0x0a, 0x07, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, + 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, + 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, - 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, - 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, - 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, - 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, - 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, - 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, - 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x60, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, - 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, - 0x69, 0x63, 0x79, 0x22, 0x67, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, - 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, - 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xd7, 0x01, 0x0a, - 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, - 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, + 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, + 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, + 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x22, 0x60, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, - 0x2a, 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, - 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0xda, 0x41, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xb2, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x22, 0x67, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, + 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0xd7, 0x01, 0x0a, 0x12, + 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, + 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, + 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, + 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, + 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x37, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, + 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, - 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, - 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, + 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, + 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7e, + 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, + 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, + 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, + 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, + 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, + 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, + 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, + 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, + 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, + 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, - 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, - 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, - 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, - 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, - 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, - 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, - 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, - 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, + 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa5, + 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, + 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, 0x12, 0x6f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, + 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57, + 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, + 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x0d, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, - 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, - 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, 0x12, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, - 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, - 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, - 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, - 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, - 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, - 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, - 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, + 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, + 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, + 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, - 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, - 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, - 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, + 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, + 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, + 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, - 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, - 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, - 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, - 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x80, 0x01, 0x0a, - 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, - 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, - 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, - 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, - 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, - 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, + 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, + 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x80, 0x01, 0x0a, 0x11, + 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, + 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x95, + 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, + 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2c, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, + 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, - 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, - 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, 0x01, - 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, - 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, - 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0xda, 0x41, - 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, - 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, - 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x1a, 0xa7, 0x02, - 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, + 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x24, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, + 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7c, + 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, + 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, + 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x9d, 0x01, 0x0a, + 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, + 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x3f, 0xda, 0x41, 0x14, + 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12, 0x20, 0x0a, 0x10, 0x68, 0x6d, + 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x1a, 0xa7, 0x02, 0xca, + 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, - 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, - 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, - 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, - 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, - 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, - 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, - 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, - 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, - 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, - 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, - 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, - 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, + 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, + 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, + 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x66, + 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68, 0x74, 0x74, 0x70, + 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, + 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, + 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, + 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a, 0x21, 0x63, 0x6c, + 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x12, + 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52, 0x69, 0x6e, 0x67, + 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f, 0x63, 0x72, 0x79, + 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x5f, + 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, + 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -8176,7 +8843,7 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte { } var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 74) +var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 78) var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values (*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest @@ -8193,233 +8860,251 @@ var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest - (*CancelResumableWriteRequest)(nil), // 15: google.storage.v2.CancelResumableWriteRequest - (*CancelResumableWriteResponse)(nil), // 16: google.storage.v2.CancelResumableWriteResponse - (*ReadObjectRequest)(nil), // 17: google.storage.v2.ReadObjectRequest - (*GetObjectRequest)(nil), // 18: google.storage.v2.GetObjectRequest - (*ReadObjectResponse)(nil), // 19: google.storage.v2.ReadObjectResponse - (*WriteObjectSpec)(nil), // 20: google.storage.v2.WriteObjectSpec - (*WriteObjectRequest)(nil), // 21: google.storage.v2.WriteObjectRequest - (*WriteObjectResponse)(nil), // 22: google.storage.v2.WriteObjectResponse - (*ListObjectsRequest)(nil), // 23: google.storage.v2.ListObjectsRequest - (*QueryWriteStatusRequest)(nil), // 24: google.storage.v2.QueryWriteStatusRequest - (*QueryWriteStatusResponse)(nil), // 25: google.storage.v2.QueryWriteStatusResponse - (*RewriteObjectRequest)(nil), // 26: google.storage.v2.RewriteObjectRequest - (*RewriteResponse)(nil), // 27: google.storage.v2.RewriteResponse - (*StartResumableWriteRequest)(nil), // 28: google.storage.v2.StartResumableWriteRequest - (*StartResumableWriteResponse)(nil), // 29: google.storage.v2.StartResumableWriteResponse - (*UpdateObjectRequest)(nil), // 30: google.storage.v2.UpdateObjectRequest - (*GetServiceAccountRequest)(nil), // 31: google.storage.v2.GetServiceAccountRequest - (*CreateHmacKeyRequest)(nil), // 32: google.storage.v2.CreateHmacKeyRequest - (*CreateHmacKeyResponse)(nil), // 33: google.storage.v2.CreateHmacKeyResponse - (*DeleteHmacKeyRequest)(nil), // 34: google.storage.v2.DeleteHmacKeyRequest - (*GetHmacKeyRequest)(nil), // 35: google.storage.v2.GetHmacKeyRequest - (*ListHmacKeysRequest)(nil), // 36: google.storage.v2.ListHmacKeysRequest - (*ListHmacKeysResponse)(nil), // 37: google.storage.v2.ListHmacKeysResponse - (*UpdateHmacKeyRequest)(nil), // 38: google.storage.v2.UpdateHmacKeyRequest - (*CommonObjectRequestParams)(nil), // 39: google.storage.v2.CommonObjectRequestParams - (*ServiceConstants)(nil), // 40: google.storage.v2.ServiceConstants - (*Bucket)(nil), // 41: google.storage.v2.Bucket - (*BucketAccessControl)(nil), // 42: google.storage.v2.BucketAccessControl - (*ChecksummedData)(nil), // 43: google.storage.v2.ChecksummedData - (*ObjectChecksums)(nil), // 44: google.storage.v2.ObjectChecksums - (*HmacKeyMetadata)(nil), // 45: google.storage.v2.HmacKeyMetadata - (*NotificationConfig)(nil), // 46: google.storage.v2.NotificationConfig - (*CustomerEncryption)(nil), // 47: google.storage.v2.CustomerEncryption - (*Object)(nil), // 48: google.storage.v2.Object - (*ObjectAccessControl)(nil), // 49: google.storage.v2.ObjectAccessControl - (*ListObjectsResponse)(nil), // 50: google.storage.v2.ListObjectsResponse - (*ProjectTeam)(nil), // 51: google.storage.v2.ProjectTeam - (*ServiceAccount)(nil), // 52: google.storage.v2.ServiceAccount - (*Owner)(nil), // 53: google.storage.v2.Owner - (*ContentRange)(nil), // 54: google.storage.v2.ContentRange - (*ComposeObjectRequest_SourceObject)(nil), // 55: google.storage.v2.ComposeObjectRequest.SourceObject - (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 56: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - (*Bucket_Billing)(nil), // 57: google.storage.v2.Bucket.Billing - (*Bucket_Cors)(nil), // 58: google.storage.v2.Bucket.Cors - (*Bucket_Encryption)(nil), // 59: google.storage.v2.Bucket.Encryption - (*Bucket_IamConfig)(nil), // 60: google.storage.v2.Bucket.IamConfig - (*Bucket_Lifecycle)(nil), // 61: google.storage.v2.Bucket.Lifecycle - (*Bucket_Logging)(nil), // 62: google.storage.v2.Bucket.Logging - (*Bucket_RetentionPolicy)(nil), // 63: google.storage.v2.Bucket.RetentionPolicy - (*Bucket_Versioning)(nil), // 64: google.storage.v2.Bucket.Versioning - (*Bucket_Website)(nil), // 65: google.storage.v2.Bucket.Website - (*Bucket_CustomPlacementConfig)(nil), // 66: google.storage.v2.Bucket.CustomPlacementConfig - (*Bucket_Autoclass)(nil), // 67: google.storage.v2.Bucket.Autoclass - nil, // 68: google.storage.v2.Bucket.LabelsEntry - (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 69: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - (*Bucket_Lifecycle_Rule)(nil), // 70: google.storage.v2.Bucket.Lifecycle.Rule - (*Bucket_Lifecycle_Rule_Action)(nil), // 71: google.storage.v2.Bucket.Lifecycle.Rule.Action - (*Bucket_Lifecycle_Rule_Condition)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule.Condition - nil, // 73: google.storage.v2.NotificationConfig.CustomAttributesEntry - nil, // 74: google.storage.v2.Object.MetadataEntry - (*fieldmaskpb.FieldMask)(nil), // 75: google.protobuf.FieldMask - (*timestamppb.Timestamp)(nil), // 76: google.protobuf.Timestamp - (*durationpb.Duration)(nil), // 77: google.protobuf.Duration - (*date.Date)(nil), // 78: google.type.Date - (*iampb.GetIamPolicyRequest)(nil), // 79: google.iam.v1.GetIamPolicyRequest - (*iampb.SetIamPolicyRequest)(nil), // 80: google.iam.v1.SetIamPolicyRequest - (*iampb.TestIamPermissionsRequest)(nil), // 81: google.iam.v1.TestIamPermissionsRequest - (*emptypb.Empty)(nil), // 82: google.protobuf.Empty - (*iampb.Policy)(nil), // 83: google.iam.v1.Policy - (*iampb.TestIamPermissionsResponse)(nil), // 84: google.iam.v1.TestIamPermissionsResponse + (*RestoreObjectRequest)(nil), // 15: google.storage.v2.RestoreObjectRequest + (*CancelResumableWriteRequest)(nil), // 16: google.storage.v2.CancelResumableWriteRequest + (*CancelResumableWriteResponse)(nil), // 17: google.storage.v2.CancelResumableWriteResponse + (*ReadObjectRequest)(nil), // 18: google.storage.v2.ReadObjectRequest + (*GetObjectRequest)(nil), // 19: google.storage.v2.GetObjectRequest + (*ReadObjectResponse)(nil), // 20: google.storage.v2.ReadObjectResponse + (*WriteObjectSpec)(nil), // 21: google.storage.v2.WriteObjectSpec + (*WriteObjectRequest)(nil), // 22: google.storage.v2.WriteObjectRequest + (*WriteObjectResponse)(nil), // 23: google.storage.v2.WriteObjectResponse + (*BidiWriteObjectRequest)(nil), // 24: google.storage.v2.BidiWriteObjectRequest + (*BidiWriteObjectResponse)(nil), // 25: google.storage.v2.BidiWriteObjectResponse + (*ListObjectsRequest)(nil), // 26: google.storage.v2.ListObjectsRequest + (*QueryWriteStatusRequest)(nil), // 27: google.storage.v2.QueryWriteStatusRequest + (*QueryWriteStatusResponse)(nil), // 28: google.storage.v2.QueryWriteStatusResponse + (*RewriteObjectRequest)(nil), // 29: google.storage.v2.RewriteObjectRequest + (*RewriteResponse)(nil), // 30: google.storage.v2.RewriteResponse + (*StartResumableWriteRequest)(nil), // 31: google.storage.v2.StartResumableWriteRequest + (*StartResumableWriteResponse)(nil), // 32: google.storage.v2.StartResumableWriteResponse + (*UpdateObjectRequest)(nil), // 33: google.storage.v2.UpdateObjectRequest + (*GetServiceAccountRequest)(nil), // 34: google.storage.v2.GetServiceAccountRequest + (*CreateHmacKeyRequest)(nil), // 35: google.storage.v2.CreateHmacKeyRequest + (*CreateHmacKeyResponse)(nil), // 36: google.storage.v2.CreateHmacKeyResponse + (*DeleteHmacKeyRequest)(nil), // 37: google.storage.v2.DeleteHmacKeyRequest + (*GetHmacKeyRequest)(nil), // 38: google.storage.v2.GetHmacKeyRequest + (*ListHmacKeysRequest)(nil), // 39: google.storage.v2.ListHmacKeysRequest + (*ListHmacKeysResponse)(nil), // 40: google.storage.v2.ListHmacKeysResponse + (*UpdateHmacKeyRequest)(nil), // 41: google.storage.v2.UpdateHmacKeyRequest + (*CommonObjectRequestParams)(nil), // 42: google.storage.v2.CommonObjectRequestParams + (*ServiceConstants)(nil), // 43: google.storage.v2.ServiceConstants + (*Bucket)(nil), // 44: google.storage.v2.Bucket + (*BucketAccessControl)(nil), // 45: google.storage.v2.BucketAccessControl + (*ChecksummedData)(nil), // 46: google.storage.v2.ChecksummedData + (*ObjectChecksums)(nil), // 47: google.storage.v2.ObjectChecksums + (*HmacKeyMetadata)(nil), // 48: google.storage.v2.HmacKeyMetadata + (*NotificationConfig)(nil), // 49: google.storage.v2.NotificationConfig + (*CustomerEncryption)(nil), // 50: google.storage.v2.CustomerEncryption + (*Object)(nil), // 51: google.storage.v2.Object + (*ObjectAccessControl)(nil), // 52: google.storage.v2.ObjectAccessControl + (*ListObjectsResponse)(nil), // 53: google.storage.v2.ListObjectsResponse + (*ProjectTeam)(nil), // 54: google.storage.v2.ProjectTeam + (*ServiceAccount)(nil), // 55: google.storage.v2.ServiceAccount + (*Owner)(nil), // 56: google.storage.v2.Owner + (*ContentRange)(nil), // 57: google.storage.v2.ContentRange + (*ComposeObjectRequest_SourceObject)(nil), // 58: google.storage.v2.ComposeObjectRequest.SourceObject + (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 59: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + (*Bucket_Billing)(nil), // 60: google.storage.v2.Bucket.Billing + (*Bucket_Cors)(nil), // 61: google.storage.v2.Bucket.Cors + (*Bucket_Encryption)(nil), // 62: google.storage.v2.Bucket.Encryption + (*Bucket_IamConfig)(nil), // 63: google.storage.v2.Bucket.IamConfig + (*Bucket_Lifecycle)(nil), // 64: google.storage.v2.Bucket.Lifecycle + (*Bucket_Logging)(nil), // 65: google.storage.v2.Bucket.Logging + (*Bucket_RetentionPolicy)(nil), // 66: google.storage.v2.Bucket.RetentionPolicy + (*Bucket_SoftDeletePolicy)(nil), // 67: google.storage.v2.Bucket.SoftDeletePolicy + (*Bucket_Versioning)(nil), // 68: google.storage.v2.Bucket.Versioning + (*Bucket_Website)(nil), // 69: google.storage.v2.Bucket.Website + (*Bucket_CustomPlacementConfig)(nil), // 70: google.storage.v2.Bucket.CustomPlacementConfig + (*Bucket_Autoclass)(nil), // 71: google.storage.v2.Bucket.Autoclass + nil, // 72: google.storage.v2.Bucket.LabelsEntry + (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 73: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + (*Bucket_Lifecycle_Rule)(nil), // 74: google.storage.v2.Bucket.Lifecycle.Rule + (*Bucket_Lifecycle_Rule_Action)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule.Action + (*Bucket_Lifecycle_Rule_Condition)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Condition + nil, // 77: google.storage.v2.NotificationConfig.CustomAttributesEntry + nil, // 78: google.storage.v2.Object.MetadataEntry + (*fieldmaskpb.FieldMask)(nil), // 79: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 80: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 81: google.protobuf.Duration + (*date.Date)(nil), // 82: google.type.Date + (*iampb.GetIamPolicyRequest)(nil), // 83: google.iam.v1.GetIamPolicyRequest + (*iampb.SetIamPolicyRequest)(nil), // 84: google.iam.v1.SetIamPolicyRequest + (*iampb.TestIamPermissionsRequest)(nil), // 85: google.iam.v1.TestIamPermissionsRequest + (*emptypb.Empty)(nil), // 86: google.protobuf.Empty + (*iampb.Policy)(nil), // 87: google.iam.v1.Policy + (*iampb.TestIamPermissionsResponse)(nil), // 88: google.iam.v1.TestIamPermissionsResponse } var file_google_storage_v2_storage_proto_depIdxs = []int32{ - 75, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask - 41, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 75, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask - 41, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket - 41, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket - 75, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask - 46, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig - 46, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig - 48, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object - 55, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject - 39, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 39, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 39, // 13: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 75, // 14: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 39, // 15: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 75, // 16: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask - 43, // 17: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 44, // 18: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 54, // 19: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange - 48, // 20: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object - 48, // 21: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object - 20, // 22: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 43, // 23: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData - 44, // 24: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 39, // 25: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 48, // 26: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object - 75, // 27: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask - 39, // 28: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 48, // 29: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object - 48, // 30: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object - 39, // 31: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 32: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 48, // 33: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object - 20, // 34: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec - 39, // 35: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 44, // 36: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums - 48, // 37: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object - 75, // 38: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask - 39, // 39: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams - 45, // 40: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata - 45, // 41: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata - 45, // 42: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata - 75, // 43: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask - 42, // 44: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl - 49, // 45: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl - 61, // 46: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle - 76, // 47: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp - 58, // 48: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors - 76, // 49: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp - 68, // 50: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry - 65, // 51: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website - 64, // 52: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning - 62, // 53: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging - 53, // 54: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner - 59, // 55: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption - 57, // 56: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing - 63, // 57: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy - 60, // 58: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig - 66, // 59: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig - 67, // 60: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass - 51, // 61: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 76, // 62: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp - 76, // 63: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp - 73, // 64: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry - 49, // 65: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl - 76, // 66: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp - 76, // 67: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp - 44, // 68: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums - 76, // 69: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp - 76, // 70: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp - 76, // 71: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp - 74, // 72: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry - 53, // 73: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner - 47, // 74: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption - 76, // 75: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp - 51, // 76: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam - 48, // 77: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object - 56, // 78: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions - 69, // 79: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess - 70, // 80: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule - 76, // 81: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp - 77, // 82: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration - 76, // 83: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp - 76, // 84: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp - 71, // 85: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action - 72, // 86: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition - 78, // 87: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date - 78, // 88: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date - 78, // 89: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date - 1, // 90: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest - 2, // 91: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest - 3, // 92: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest - 4, // 93: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest - 6, // 94: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest - 79, // 95: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest - 80, // 96: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest - 81, // 97: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 7, // 98: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest - 8, // 99: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest - 9, // 100: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest - 10, // 101: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest - 11, // 102: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest - 13, // 103: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest - 14, // 104: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest - 15, // 105: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest - 18, // 106: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest - 17, // 107: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest - 30, // 108: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest - 21, // 109: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest - 23, // 110: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest - 26, // 111: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest - 28, // 112: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest - 24, // 113: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest - 31, // 114: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest - 32, // 115: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest - 34, // 116: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest - 35, // 117: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest - 36, // 118: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest - 38, // 119: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest - 82, // 120: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty - 41, // 121: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket - 41, // 122: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket - 5, // 123: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse - 41, // 124: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket - 83, // 125: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy - 83, // 126: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy - 84, // 127: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse - 41, // 128: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket - 82, // 129: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty - 46, // 130: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 46, // 131: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig - 12, // 132: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse - 48, // 133: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object - 82, // 134: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty - 16, // 135: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse - 48, // 136: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object - 19, // 137: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse - 48, // 138: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object - 22, // 139: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse - 50, // 140: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse - 27, // 141: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse - 29, // 142: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse - 25, // 143: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse - 52, // 144: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount - 33, // 145: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse - 82, // 146: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty - 45, // 147: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 37, // 148: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse - 45, // 149: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata - 120, // [120:150] is the sub-list for method output_type - 90, // [90:120] is the sub-list for method input_type - 90, // [90:90] is the sub-list for extension type_name - 90, // [90:90] is the sub-list for extension extendee - 0, // [0:90] is the sub-list for field type_name + 79, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask + 44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 79, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask + 44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket + 44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket + 79, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask + 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig + 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig + 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object + 58, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject + 42, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 79, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 79, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask + 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange + 51, // 21: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object + 51, // 22: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object + 21, // 23: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 46, // 24: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 25: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 26: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 27: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object + 21, // 28: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 46, // 29: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData + 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object + 79, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask + 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object + 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object + 42, // 37: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 38: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 51, // 39: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object + 21, // 40: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec + 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums + 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object + 79, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask + 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams + 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata + 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata + 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata + 79, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask + 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl + 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl + 64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle + 80, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp + 61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors + 80, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp + 72, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry + 69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website + 68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning + 65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging + 56, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner + 62, // 61: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption + 60, // 62: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing + 66, // 63: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy + 63, // 64: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig + 70, // 65: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig + 71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass + 67, // 67: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy + 54, // 68: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 80, // 69: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp + 80, // 70: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp + 77, // 71: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry + 52, // 72: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl + 80, // 73: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp + 80, // 74: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp + 47, // 75: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums + 80, // 76: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp + 80, // 77: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp + 80, // 78: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp + 78, // 79: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry + 56, // 80: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner + 50, // 81: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption + 80, // 82: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp + 54, // 83: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam + 51, // 84: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object + 59, // 85: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions + 73, // 86: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess + 74, // 87: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule + 80, // 88: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp + 81, // 89: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration + 81, // 90: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration + 80, // 91: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp + 80, // 92: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp + 80, // 93: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp + 80, // 94: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp + 75, // 95: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action + 76, // 96: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition + 82, // 97: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date + 82, // 98: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date + 82, // 99: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date + 1, // 100: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest + 2, // 101: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest + 3, // 102: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest + 4, // 103: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest + 6, // 104: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest + 83, // 105: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest + 84, // 106: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest + 85, // 107: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest + 7, // 108: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest + 8, // 109: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest + 9, // 110: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest + 10, // 111: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest + 11, // 112: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest + 13, // 113: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest + 14, // 114: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest + 15, // 115: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest + 16, // 116: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest + 19, // 117: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest + 18, // 118: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest + 33, // 119: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest + 22, // 120: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest + 24, // 121: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest + 26, // 122: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest + 29, // 123: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest + 31, // 124: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest + 27, // 125: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest + 34, // 126: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest + 35, // 127: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest + 37, // 128: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest + 38, // 129: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest + 39, // 130: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest + 41, // 131: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest + 86, // 132: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty + 44, // 133: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket + 44, // 134: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket + 5, // 135: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse + 44, // 136: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket + 87, // 137: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy + 87, // 138: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy + 88, // 139: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse + 44, // 140: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket + 86, // 141: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty + 49, // 142: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 49, // 143: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig + 12, // 144: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse + 51, // 145: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object + 86, // 146: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty + 51, // 147: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object + 17, // 148: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse + 51, // 149: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object + 20, // 150: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse + 51, // 151: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object + 23, // 152: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse + 25, // 153: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse + 53, // 154: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse + 30, // 155: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse + 32, // 156: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse + 28, // 157: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse + 55, // 158: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount + 36, // 159: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse + 86, // 160: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty + 48, // 161: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 40, // 162: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse + 48, // 163: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata + 132, // [132:164] is the sub-list for method output_type + 100, // [100:132] is the sub-list for method input_type + 100, // [100:100] is the sub-list for extension type_name + 100, // [100:100] is the sub-list for extension extendee + 0, // [0:100] is the sub-list for field type_name } func init() { file_google_storage_v2_storage_proto_init() } @@ -8597,7 +9282,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelResumableWriteRequest); i { + switch v := v.(*RestoreObjectRequest); i { case 0: return &v.state case 1: @@ -8609,7 +9294,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelResumableWriteResponse); i { + switch v := v.(*CancelResumableWriteRequest); i { case 0: return &v.state case 1: @@ -8621,7 +9306,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectRequest); i { + switch v := v.(*CancelResumableWriteResponse); i { case 0: return &v.state case 1: @@ -8633,7 +9318,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetObjectRequest); i { + switch v := v.(*ReadObjectRequest); i { case 0: return &v.state case 1: @@ -8645,7 +9330,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadObjectResponse); i { + switch v := v.(*GetObjectRequest); i { case 0: return &v.state case 1: @@ -8657,7 +9342,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectSpec); i { + switch v := v.(*ReadObjectResponse); i { case 0: return &v.state case 1: @@ -8669,7 +9354,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectRequest); i { + switch v := v.(*WriteObjectSpec); i { case 0: return &v.state case 1: @@ -8681,7 +9366,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WriteObjectResponse); i { + switch v := v.(*WriteObjectRequest); i { case 0: return &v.state case 1: @@ -8693,7 +9378,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsRequest); i { + switch v := v.(*WriteObjectResponse); i { case 0: return &v.state case 1: @@ -8705,7 +9390,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusRequest); i { + switch v := v.(*BidiWriteObjectRequest); i { case 0: return &v.state case 1: @@ -8717,7 +9402,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryWriteStatusResponse); i { + switch v := v.(*BidiWriteObjectResponse); i { case 0: return &v.state case 1: @@ -8729,7 +9414,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteObjectRequest); i { + switch v := v.(*ListObjectsRequest); i { case 0: return &v.state case 1: @@ -8741,7 +9426,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RewriteResponse); i { + switch v := v.(*QueryWriteStatusRequest); i { case 0: return &v.state case 1: @@ -8753,7 +9438,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteRequest); i { + switch v := v.(*QueryWriteStatusResponse); i { case 0: return &v.state case 1: @@ -8765,7 +9450,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResumableWriteResponse); i { + switch v := v.(*RewriteObjectRequest); i { case 0: return &v.state case 1: @@ -8777,7 +9462,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateObjectRequest); i { + switch v := v.(*RewriteResponse); i { case 0: return &v.state case 1: @@ -8789,7 +9474,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetServiceAccountRequest); i { + switch v := v.(*StartResumableWriteRequest); i { case 0: return &v.state case 1: @@ -8801,7 +9486,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyRequest); i { + switch v := v.(*StartResumableWriteResponse); i { case 0: return &v.state case 1: @@ -8813,7 +9498,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateHmacKeyResponse); i { + switch v := v.(*UpdateObjectRequest); i { case 0: return &v.state case 1: @@ -8825,7 +9510,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteHmacKeyRequest); i { + switch v := v.(*GetServiceAccountRequest); i { case 0: return &v.state case 1: @@ -8837,7 +9522,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetHmacKeyRequest); i { + switch v := v.(*CreateHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8849,7 +9534,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysRequest); i { + switch v := v.(*CreateHmacKeyResponse); i { case 0: return &v.state case 1: @@ -8861,7 +9546,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListHmacKeysResponse); i { + switch v := v.(*DeleteHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8873,7 +9558,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateHmacKeyRequest); i { + switch v := v.(*GetHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8885,7 +9570,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CommonObjectRequestParams); i { + switch v := v.(*ListHmacKeysRequest); i { case 0: return &v.state case 1: @@ -8897,7 +9582,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceConstants); i { + switch v := v.(*ListHmacKeysResponse); i { case 0: return &v.state case 1: @@ -8909,7 +9594,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket); i { + switch v := v.(*UpdateHmacKeyRequest); i { case 0: return &v.state case 1: @@ -8921,7 +9606,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BucketAccessControl); i { + switch v := v.(*CommonObjectRequestParams); i { case 0: return &v.state case 1: @@ -8933,7 +9618,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ChecksummedData); i { + switch v := v.(*ServiceConstants); i { case 0: return &v.state case 1: @@ -8945,7 +9630,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectChecksums); i { + switch v := v.(*Bucket); i { case 0: return &v.state case 1: @@ -8957,7 +9642,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HmacKeyMetadata); i { + switch v := v.(*BucketAccessControl); i { case 0: return &v.state case 1: @@ -8969,7 +9654,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NotificationConfig); i { + switch v := v.(*ChecksummedData); i { case 0: return &v.state case 1: @@ -8981,7 +9666,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CustomerEncryption); i { + switch v := v.(*ObjectChecksums); i { case 0: return &v.state case 1: @@ -8993,7 +9678,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Object); i { + switch v := v.(*HmacKeyMetadata); i { case 0: return &v.state case 1: @@ -9005,7 +9690,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ObjectAccessControl); i { + switch v := v.(*NotificationConfig); i { case 0: return &v.state case 1: @@ -9017,7 +9702,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListObjectsResponse); i { + switch v := v.(*CustomerEncryption); i { case 0: return &v.state case 1: @@ -9029,7 +9714,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProjectTeam); i { + switch v := v.(*Object); i { case 0: return &v.state case 1: @@ -9041,7 +9726,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServiceAccount); i { + switch v := v.(*ObjectAccessControl); i { case 0: return &v.state case 1: @@ -9053,7 +9738,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Owner); i { + switch v := v.(*ListObjectsResponse); i { case 0: return &v.state case 1: @@ -9065,7 +9750,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ContentRange); i { + switch v := v.(*ProjectTeam); i { case 0: return &v.state case 1: @@ -9077,7 +9762,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject); i { + switch v := v.(*ServiceAccount); i { case 0: return &v.state case 1: @@ -9089,7 +9774,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { + switch v := v.(*Owner); i { case 0: return &v.state case 1: @@ -9101,7 +9786,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Billing); i { + switch v := v.(*ContentRange); i { case 0: return &v.state case 1: @@ -9113,7 +9798,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Cors); i { + switch v := v.(*ComposeObjectRequest_SourceObject); i { case 0: return &v.state case 1: @@ -9125,7 +9810,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Encryption); i { + switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i { case 0: return &v.state case 1: @@ -9137,7 +9822,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig); i { + switch v := v.(*Bucket_Billing); i { case 0: return &v.state case 1: @@ -9149,7 +9834,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle); i { + switch v := v.(*Bucket_Cors); i { case 0: return &v.state case 1: @@ -9161,7 +9846,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Logging); i { + switch v := v.(*Bucket_Encryption); i { case 0: return &v.state case 1: @@ -9173,7 +9858,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_RetentionPolicy); i { + switch v := v.(*Bucket_IamConfig); i { case 0: return &v.state case 1: @@ -9185,7 +9870,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Versioning); i { + switch v := v.(*Bucket_Lifecycle); i { case 0: return &v.state case 1: @@ -9197,7 +9882,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Website); i { + switch v := v.(*Bucket_Logging); i { case 0: return &v.state case 1: @@ -9209,7 +9894,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_CustomPlacementConfig); i { + switch v := v.(*Bucket_RetentionPolicy); i { case 0: return &v.state case 1: @@ -9221,7 +9906,19 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Autoclass); i { + switch v := v.(*Bucket_SoftDeletePolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Versioning); i { case 0: return &v.state case 1: @@ -9233,7 +9930,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { + switch v := v.(*Bucket_Website); i { case 0: return &v.state case 1: @@ -9245,7 +9942,7 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Bucket_Lifecycle_Rule); i { + switch v := v.(*Bucket_CustomPlacementConfig); i { case 0: return &v.state case 1: @@ -9257,6 +9954,42 @@ func file_google_storage_v2_storage_proto_init() { } } file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Autoclass); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Bucket_Lifecycle_Rule); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket_Lifecycle_Rule_Action); i { case 0: return &v.state @@ -9268,7 +10001,7 @@ func file_google_storage_v2_storage_proto_init() { return nil } } - file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Bucket_Lifecycle_Rule_Condition); i { case 0: return &v.state @@ -9287,37 +10020,49 @@ func file_google_storage_v2_storage_proto_init() { file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[16].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []interface{}{} file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ (*WriteObjectRequest_UploadId)(nil), (*WriteObjectRequest_WriteObjectSpec)(nil), (*WriteObjectRequest_ChecksummedData)(nil), } - file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{ + file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{ (*WriteObjectResponse_PersistedSize)(nil), (*WriteObjectResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []interface{}{ + (*BidiWriteObjectRequest_UploadId)(nil), + (*BidiWriteObjectRequest_WriteObjectSpec)(nil), + (*BidiWriteObjectRequest_ChecksummedData)(nil), + } file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{ + (*BidiWriteObjectResponse_PersistedSize)(nil), + (*BidiWriteObjectResponse_Resource)(nil), + } + file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []interface{}{ (*QueryWriteStatusResponse_PersistedSize)(nil), (*QueryWriteStatusResponse_Resource)(nil), } - file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[29].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[42].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[43].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[47].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[55].OneofWrappers = []interface{}{} - file_google_storage_v2_storage_proto_msgTypes[71].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []interface{}{} + file_google_storage_v2_storage_proto_msgTypes[75].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_storage_v2_storage_proto_rawDesc, NumEnums: 1, - NumMessages: 74, + NumMessages: 78, NumExtensions: 0, NumServices: 1, }, @@ -9356,19 +10101,19 @@ type StorageClient interface { LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) // Gets the IAM policy for a specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Updates an IAM policy for the specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) @@ -9393,6 +10138,8 @@ type StorageClient interface { // the bucket, deleted objects can be restored using RestoreObject until the // soft delete retention period has passed. DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Restores a soft-deleted object. + RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) // Cancels an in-progress resumable upload. // // Any attempts to write to the resumable upload after cancelling the upload @@ -9465,7 +10212,26 @@ type StorageClient interface { // status, with a WriteObjectResponse containing the finalized object's // metadata. // + // Alternatively, the BidiWriteObject operation may be used to write an + // object with controls over flushing and the ability to fetch the ability to + // determine the current persisted size. WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) + // Stores a new object and metadata. + // + // This is similar to the WriteObject call with the added support for + // manual flushing of persisted state, and the ability to determine current + // persisted size without closing the stream. + // + // The client may specify one or both of the `state_lookup` and `flush` fields + // in each BidiWriteObjectRequest. If `flush` is specified, the data written + // so far will be persisted to storage. If `state_lookup` is specified, the + // service will respond with a BidiWriteObjectResponse that contains the + // persisted size. If both `flush` and `state_lookup` are specified, the flush + // will always occur before a `state_lookup`, so that both may be set in the + // same request and the returned state will be the state of the object + // post-flush. When the stream is closed, a BidiWriteObjectResponse will + // always be sent to the client, regardless of the value of `state_lookup`. + BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) // Retrieves a list of objects matching the criteria. ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) // Rewrites a source object to a destination object. Optionally overrides @@ -9646,6 +10412,15 @@ func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectReques return out, nil } +func (c *storageClient) RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error) { + out := new(Object) + err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/RestoreObject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *storageClient) CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) { out := new(CancelResumableWriteResponse) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CancelResumableWrite", in, out, opts...) @@ -9739,6 +10514,37 @@ func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error) return m, nil } +func (c *storageClient) BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) { + stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/BidiWriteObject", opts...) + if err != nil { + return nil, err + } + x := &storageBidiWriteObjectClient{stream} + return x, nil +} + +type Storage_BidiWriteObjectClient interface { + Send(*BidiWriteObjectRequest) error + Recv() (*BidiWriteObjectResponse, error) + grpc.ClientStream +} + +type storageBidiWriteObjectClient struct { + grpc.ClientStream +} + +func (x *storageBidiWriteObjectClient) Send(m *BidiWriteObjectRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *storageBidiWriteObjectClient) Recv() (*BidiWriteObjectResponse, error) { + m := new(BidiWriteObjectResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func (c *storageClient) ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error) { out := new(ListObjectsResponse) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListObjects", in, out, opts...) @@ -9843,19 +10649,19 @@ type StorageServer interface { LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) // Gets the IAM policy for a specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) // Updates an IAM policy for the specified bucket or object. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) // Tests a set of permissions on the given bucket or object to see which, if // any, are held by the caller. // The `resource` field in the request should be - // projects/_/buckets/ for a bucket or - // projects/_/buckets//objects/ for an object. + // `projects/_/buckets/{bucket}` for a bucket or + // `projects/_/buckets/{bucket}/objects/{object}` for an object. TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) @@ -9880,6 +10686,8 @@ type StorageServer interface { // the bucket, deleted objects can be restored using RestoreObject until the // soft delete retention period has passed. DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) + // Restores a soft-deleted object. + RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) // Cancels an in-progress resumable upload. // // Any attempts to write to the resumable upload after cancelling the upload @@ -9952,7 +10760,26 @@ type StorageServer interface { // status, with a WriteObjectResponse containing the finalized object's // metadata. // + // Alternatively, the BidiWriteObject operation may be used to write an + // object with controls over flushing and the ability to fetch the ability to + // determine the current persisted size. WriteObject(Storage_WriteObjectServer) error + // Stores a new object and metadata. + // + // This is similar to the WriteObject call with the added support for + // manual flushing of persisted state, and the ability to determine current + // persisted size without closing the stream. + // + // The client may specify one or both of the `state_lookup` and `flush` fields + // in each BidiWriteObjectRequest. If `flush` is specified, the data written + // so far will be persisted to storage. If `state_lookup` is specified, the + // service will respond with a BidiWriteObjectResponse that contains the + // persisted size. If both `flush` and `state_lookup` are specified, the flush + // will always occur before a `state_lookup`, so that both may be set in the + // same request and the returned state will be the state of the object + // post-flush. When the stream is closed, a BidiWriteObjectResponse will + // always be sent to the client, regardless of the value of `state_lookup`. + BidiWriteObject(Storage_BidiWriteObjectServer) error // Retrieves a list of objects matching the criteria. ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) // Rewrites a source object to a destination object. Optionally overrides @@ -10039,6 +10866,9 @@ func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObject func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") } +func (*UnimplementedStorageServer) RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) { + return nil, status.Errorf(codes.Unimplemented, "method RestoreObject not implemented") +} func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented") } @@ -10054,6 +10884,9 @@ func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRe func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error { return status.Errorf(codes.Unimplemented, "method WriteObject not implemented") } +func (*UnimplementedStorageServer) BidiWriteObject(Storage_BidiWriteObjectServer) error { + return status.Errorf(codes.Unimplemented, "method BidiWriteObject not implemented") +} func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented") } @@ -10359,6 +11192,24 @@ func _Storage_DeleteObject_Handler(srv interface{}, ctx context.Context, dec fun return interceptor(ctx, in, info, handler) } +func _Storage_RestoreObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RestoreObjectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StorageServer).RestoreObject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.storage.v2.Storage/RestoreObject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StorageServer).RestoreObject(ctx, req.(*RestoreObjectRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Storage_CancelResumableWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CancelResumableWriteRequest) if err := dec(in); err != nil { @@ -10460,6 +11311,32 @@ func (x *storageWriteObjectServer) Recv() (*WriteObjectRequest, error) { return m, nil } +func _Storage_BidiWriteObject_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StorageServer).BidiWriteObject(&storageBidiWriteObjectServer{stream}) +} + +type Storage_BidiWriteObjectServer interface { + Send(*BidiWriteObjectResponse) error + Recv() (*BidiWriteObjectRequest, error) + grpc.ServerStream +} + +type storageBidiWriteObjectServer struct { + grpc.ServerStream +} + +func (x *storageBidiWriteObjectServer) Send(m *BidiWriteObjectResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *storageBidiWriteObjectServer) Recv() (*BidiWriteObjectRequest, error) { + m := new(BidiWriteObjectRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + func _Storage_ListObjects_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ListObjectsRequest) if err := dec(in); err != nil { @@ -10704,6 +11581,10 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ MethodName: "DeleteObject", Handler: _Storage_DeleteObject_Handler, }, + { + MethodName: "RestoreObject", + Handler: _Storage_RestoreObject_Handler, + }, { MethodName: "CancelResumableWrite", Handler: _Storage_CancelResumableWrite_Handler, @@ -10768,6 +11649,12 @@ var _Storage_serviceDesc = grpc.ServiceDesc{ Handler: _Storage_WriteObject_Handler, ClientStreams: true, }, + { + StreamName: "BidiWriteObject", + Handler: _Storage_BidiWriteObject_Handler, + ServerStreams: true, + ClientStreams: true, + }, }, Metadata: "google/storage/v2/storage.proto", } diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index acc180691..4a81ada09 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.32.0" +const Version = "1.34.1" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index d8f5a6d4a..dc79fd88b 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -20,7 +20,6 @@ import ( "fmt" "io" "net" - "net/http" "net/url" "strings" @@ -29,6 +28,7 @@ import ( sinternal "cloud.google.com/go/storage/internal" "github.com/google/uuid" gax "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/callctx" "google.golang.org/api/googleapi" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -37,10 +37,15 @@ import ( var defaultRetry *retryConfig = &retryConfig{} var xGoogDefaultHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), sinternal.Version) +const ( + xGoogHeaderKey = "x-goog-api-client" + idempotencyHeaderKey = "x-goog-gcs-idempotency-token" +) + // run determines whether a retry is necessary based on the config and // idempotency information. It then calls the function with or without retries // as appropriate, using the configured settings. -func run(ctx context.Context, call func() error, retry *retryConfig, isIdempotent bool, setHeader func(string, int)) error { +func run(ctx context.Context, call func(ctx context.Context) error, retry *retryConfig, isIdempotent bool) error { attempts := 1 invocationID := uuid.New().String() @@ -48,8 +53,8 @@ func run(ctx context.Context, call func() error, retry *retryConfig, isIdempoten retry = defaultRetry } if (retry.policy == RetryIdempotent && !isIdempotent) || retry.policy == RetryNever { - setHeader(invocationID, attempts) - return call() + ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) + return call(ctxWithHeaders) } bo := gax.Backoff{} if retry.backoff != nil { @@ -63,35 +68,22 @@ func run(ctx context.Context, call func() error, retry *retryConfig, isIdempoten } return internal.Retry(ctx, bo, func() (stop bool, err error) { - setHeader(invocationID, attempts) - err = call() + ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) + err = call(ctxWithHeaders) attempts++ return !errorFunc(err), err }) } -func setRetryHeaderHTTP(req interface{ Header() http.Header }) func(string, int) { - return func(invocationID string, attempts int) { - if req == nil { - return - } - header := req.Header() - // TODO(b/274504690): Consider dropping gccl-invocation-id key since it - // duplicates the X-Goog-Gcs-Idempotency-Token header (added in v1.31.0). - invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) - xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") - header.Set("x-goog-api-client", xGoogHeader) - // Also use the invocationID for the idempotency token header, which will - // enable idempotent retries for more operations. - header.Set("x-goog-gcs-idempotency-token", invocationID) - } -} +// Sets invocation ID headers on the context which will be propagated as +// headers in the call to the service (for both gRPC and HTTP). +func setInvocationHeaders(ctx context.Context, invocationID string, attempts int) context.Context { + invocationHeader := fmt.Sprintf("gccl-invocation-id/%v gccl-attempt-count/%v", invocationID, attempts) + xGoogHeader := strings.Join([]string{invocationHeader, xGoogDefaultHeader}, " ") -// TODO: Implement method setting header via context for gRPC -func setRetryHeaderGRPC(_ context.Context) func(string, int) { - return func(_ string, _ int) { - return - } + ctx = callctx.SetHeaders(ctx, xGoogHeaderKey, xGoogHeader) + ctx = callctx.SetHeaders(ctx, idempotencyHeaderKey, invocationID) + return ctx } // ShouldRetry returns true if an error is retryable, based on best practice diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 580353053..4673a68d0 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -187,16 +187,6 @@ func setConditionsHeaders(headers http.Header, conds *Conditions) error { return nil } -// Wrap a request to look similar to an apiary library request, in order to -// be used by run(). -type readerRequestWrapper struct { - req *http.Request -} - -func (w *readerRequestWrapper) Header() http.Header { - return w.req.Header -} - var emptyBody = ioutil.NopCloser(strings.NewReader("")) // Reader reads a Cloud Storage object. diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 320439da5..a16e512f5 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -123,7 +123,7 @@ type Client struct { useGRPC bool } -// NewClient creates a new Google Cloud Storage client. +// NewClient creates a new Google Cloud Storage client using the HTTP transport. // The default scope is ScopeFullControl. To use a different scope, like // ScopeReadOnly, use option.WithScopes. // @@ -133,12 +133,6 @@ type Client struct { // You may configure the client by passing in options from the [google.golang.org/api/option] // package. You may also use options defined in this package, such as [WithJSONReads]. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - // Use the experimental gRPC client if the env var is set. - // This is an experimental API and not intended for public use. - if withGRPC := os.Getenv("STORAGE_USE_GRPC"); withGRPC != "" { - return newGRPCClient(ctx, opts...) - } - var creds *google.Credentials // In general, it is recommended to use raw.NewService instead of htransport.NewClient @@ -220,11 +214,20 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error }, nil } -// newGRPCClient creates a new Storage client that initializes a gRPC-based -// client. Calls that have not been implemented in gRPC will panic. +// NewGRPCClient creates a new Storage client using the gRPC transport and API. +// Client methods which have not been implemented in gRPC will return an error. +// In particular, methods for Cloud Pub/Sub notifications are not supported. // -// This is an experimental API and not intended for public use. -func newGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { +// The storage gRPC API is still in preview and not yet publicly available. +// If you would like to use the API, please first contact your GCP account rep to +// request access. The API may be subject to breaking changes. +// +// Clients should be reused instead of created as needed. The methods of Client +// are safe for concurrent use by multiple goroutines. +// +// You may configure the client by passing in options from the [google.golang.org/api/option] +// package. +func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { opts = append(defaultGRPCOptions(), opts...) tc, err := newGRPCStorageClient(ctx, withClientOptions(opts...)) if err != nil { @@ -2187,8 +2190,6 @@ func toProjectResource(project string) string { // setConditionProtoField uses protobuf reflection to set named condition field // to the given condition value if supported on the protobuf message. -// -// This is an experimental API and not intended for public use. func setConditionProtoField(m protoreflect.Message, f string, v int64) bool { fields := m.Descriptor().Fields() if rf := fields.ByName(protoreflect.Name(f)); rf != nil { @@ -2201,8 +2202,6 @@ func setConditionProtoField(m protoreflect.Message, f string, v int64) bool { // applyCondsProto validates and attempts to set the conditions on a protobuf // message using protobuf reflection. -// -// This is an experimental API and not intended for public use. func applyCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error { rmsg := msg.ProtoReflect() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md index cd7c0da31..b618676c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md @@ -1,5 +1,41 @@ # Release History +## 1.8.0 (2023-10-05) + +### Features Added + +* Added `Claims` and `EnableCAE` fields to `policy.TokenRequestOptions`. +* ARM bearer token policy handles CAE challenges. +* `messaging/CloudEvent` allows you to serialize/deserialize CloudEvents, as described in the CloudEvents 1.0 specification: [link](https://github.com/cloudevents/spec) +* Added functions `FetcherForNextLink` and `EncodeQueryParams` along with `FetcherForNextLinkOptions` to the `runtime` package to centralize creation of `Pager[T].Fetcher` from a next link URL. +* Added types `KeyCredential` and `SASCredential` to the `azcore` package. + * Includes their respective constructor functions. +* Added types `KeyCredentialPolicy` and `SASCredentialPolicy` to the `azcore/runtime` package. + * Includes their respective constructor functions and options types. + +### Breaking Changes +> These changes affect only code written against beta versions of `v1.8.0` +* The beta features for tracing and fakes have been omitted for this release. + +### Bugs Fixed + +* Fixed an issue that could cause some ARM RPs to not be automatically registered. +* Block bearer token authentication for non TLS protected endpoints. + +### Other Changes + +* The following functions in the `runtime` package are now exposed from the `policy` package, and the `runtime` versions have been deprecated. + * `WithCaptureResponse` + * `WithHTTPHeader` + * `WithRetryOptions` +* Updated dependencies. + +## 1.7.2 (2023-09-06) + +### Bugs Fixed + +* Fix default HTTP transport to work in WASM modules. + ## 1.7.1 (2023-08-14) ## Bugs Fixed diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go index 9fae2a9dc..9f051ba4a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/core.go @@ -22,6 +22,24 @@ type AccessToken = exported.AccessToken // TokenCredential represents a credential capable of providing an OAuth token. type TokenCredential = exported.TokenCredential +// KeyCredential contains an authentication key used to authenticate to an Azure service. +type KeyCredential = exported.KeyCredential + +// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. +// - key is the authentication key +func NewKeyCredential(key string) *KeyCredential { + return exported.NewKeyCredential(key) +} + +// SASCredential contains a shared access signature used to authenticate to an Azure service. +type SASCredential = exported.SASCredential + +// NewSASCredential creates a new instance of [SASCredential] with the specified values. +// - sas is the shared access signature +func NewSASCredential(sas string) *SASCredential { + return exported.NewSASCredential(sas) +} + // holds sentinel values used to send nulls var nullables map[reflect.Type]interface{} = map[reflect.Type]interface{}{} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go index a1236b362..e793b31db 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go @@ -10,6 +10,7 @@ import ( "context" "io" "net/http" + "sync/atomic" "time" ) @@ -51,6 +52,17 @@ type AccessToken struct { // TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token. // Exported as policy.TokenRequestOptions. type TokenRequestOptions struct { + // Claims are any additional claims required for the token to satisfy a conditional access policy, such as a + // service may return in a claims challenge following an authorization failure. If a service returned the + // claims value base64 encoded, it must be decoded before setting this field. + Claims string + + // EnableCAE indicates whether to enable Continuous Access Evaluation (CAE) for the requested token. When true, + // azidentity credentials request CAE tokens for resource APIs supporting CAE. Clients are responsible for + // handling CAE challenges. If a client that doesn't handle CAE challenges receives a CAE token, it may end up + // in a loop retrying an API call with a token that has been revoked due to CAE. + EnableCAE bool + // Scopes contains the list of permission scopes required for the token. Scopes []string @@ -65,3 +77,65 @@ type TokenCredential interface { // GetToken requests an access token for the specified set of scopes. GetToken(ctx context.Context, options TokenRequestOptions) (AccessToken, error) } + +// KeyCredential contains an authentication key used to authenticate to an Azure service. +// Exported as azcore.KeyCredential. +type KeyCredential struct { + cred *keyCredential +} + +// NewKeyCredential creates a new instance of [KeyCredential] with the specified values. +// - key is the authentication key +func NewKeyCredential(key string) *KeyCredential { + return &KeyCredential{cred: newKeyCredential(key)} +} + +// Update replaces the existing key with the specified value. +func (k *KeyCredential) Update(key string) { + k.cred.Update(key) +} + +// SASCredential contains a shared access signature used to authenticate to an Azure service. +// Exported as azcore.SASCredential. +type SASCredential struct { + cred *keyCredential +} + +// NewSASCredential creates a new instance of [SASCredential] with the specified values. +// - sas is the shared access signature +func NewSASCredential(sas string) *SASCredential { + return &SASCredential{cred: newKeyCredential(sas)} +} + +// Update replaces the existing shared access signature with the specified value. +func (k *SASCredential) Update(sas string) { + k.cred.Update(sas) +} + +// KeyCredentialGet returns the key for cred. +func KeyCredentialGet(cred *KeyCredential) string { + return cred.cred.Get() +} + +// SASCredentialGet returns the shared access sig for cred. +func SASCredentialGet(cred *SASCredential) string { + return cred.cred.Get() +} + +type keyCredential struct { + key atomic.Value // string +} + +func newKeyCredential(key string) *keyCredential { + keyCred := keyCredential{} + keyCred.key.Store(key) + return &keyCred +} + +func (k *keyCredential) Get() string { + return k.key.Load().(string) +} + +func (k *keyCredential) Update(key string) { + k.key.Store(key) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go index 577435a49..05e53aa76 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go @@ -32,5 +32,5 @@ const ( Module = "azcore" // Version is the semantic version (see http://semver.org) of this module. - Version = "v1.7.1" + Version = "v1.8.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go index db0aaa7cb..1bf3aca91 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/shared.go @@ -14,6 +14,8 @@ import ( "regexp" "strconv" "time" + + "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" ) // CtxWithHTTPHeaderKey is used as a context key for adding/retrieving http.Header. @@ -22,8 +24,8 @@ type CtxWithHTTPHeaderKey struct{} // CtxWithRetryOptionsKey is used as a context key for adding/retrieving RetryOptions. type CtxWithRetryOptionsKey struct{} -// CtxIncludeResponseKey is used as a context key for retrieving the raw response. -type CtxIncludeResponseKey struct{} +// CtxWithCaptureResponse is used as a context key for retrieving the raw response. +type CtxWithCaptureResponse struct{} // Delay waits for the duration to elapse or the context to be cancelled. func Delay(ctx context.Context, delay time.Duration) error { @@ -101,3 +103,26 @@ func ExtractModuleName(clientName string) (string, string, error) { } return matches[3], matches[2], nil } + +// NonRetriableError marks the specified error as non-retriable. +func NonRetriableError(err error) error { + return &nonRetriableError{err} +} + +type nonRetriableError struct { + error +} + +func (p *nonRetriableError) Error() string { + return p.error.Error() +} + +func (*nonRetriableError) NonRetriable() { + // marker method +} + +func (p *nonRetriableError) Unwrap() error { + return p.error +} + +var _ errorinfo.NonRetriable = (*nonRetriableError)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go index b20004783..f73704cf0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go @@ -7,11 +7,13 @@ package policy import ( + "context" "net/http" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" ) @@ -162,3 +164,22 @@ type AuthorizationHandler struct { // the policy will return any 401 response to the client. OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error } + +// WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. +// The resp parameter will contain the HTTP response after the request has completed. +func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { + return context.WithValue(parent, shared.CtxWithCaptureResponse{}, resp) +} + +// WithHTTPHeader adds the specified http.Header to the parent context. +// Use this to specify custom HTTP headers at the API-call level. +// Any overlapping headers will have their values replaced with the values specified here. +func WithHTTPHeader(parent context.Context, header http.Header) context.Context { + return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) +} + +// WithRetryOptions adds the specified RetryOptions to the parent context. +// Use this to specify custom RetryOptions at the API-call level. +func WithRetryOptions(parent context.Context, options RetryOptions) context.Context { + return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go index 5507665d6..8a2e6c61e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go @@ -10,6 +10,9 @@ import ( "context" "encoding/json" "errors" + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" ) // PagingHandler contains the required data for constructing a Pager. @@ -75,3 +78,41 @@ func (p *Pager[T]) NextPage(ctx context.Context) (T, error) { func (p *Pager[T]) UnmarshalJSON(data []byte) error { return json.Unmarshal(data, &p.current) } + +// FetcherForNextLinkOptions contains the optional values for [FetcherForNextLink]. +type FetcherForNextLinkOptions struct { + // NextReq is the func to be called when requesting subsequent pages. + // Used for paged operations that have a custom next link operation. + NextReq func(context.Context, string) (*policy.Request, error) +} + +// FetcherForNextLink is a helper containing boilerplate code to simplify creating a PagingHandler[T].Fetcher from a next link URL. +// - ctx is the [context.Context] controlling the lifetime of the HTTP operation +// - pl is the [Pipeline] used to dispatch the HTTP request +// - nextLink is the URL used to fetch the next page. the empty string indicates the first page is to be requested +// - firstReq is the func to be called when creating the request for the first page +// - options contains any optional parameters, pass nil to accept the default values +func FetcherForNextLink(ctx context.Context, pl Pipeline, nextLink string, firstReq func(context.Context) (*policy.Request, error), options *FetcherForNextLinkOptions) (*http.Response, error) { + var req *policy.Request + var err error + if nextLink == "" { + req, err = firstReq(ctx) + } else if nextLink, err = EncodeQueryParams(nextLink); err == nil { + if options != nil && options.NextReq != nil { + req, err = options.NextReq(ctx, nextLink) + } else { + req, err = NewRequest(ctx, http.MethodGet, nextLink) + } + } + if err != nil { + return nil, err + } + resp, err := pl.Do(req) + if err != nil { + return nil, err + } + if !HasStatusCode(resp, http.StatusOK) { + return nil, NewResponseError(resp) + } + return resp, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go index b61e4c121..ff4931cd2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go @@ -6,6 +6,7 @@ package runtime import ( "errors" "net/http" + "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" @@ -72,6 +73,9 @@ func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(p // Do authorizes a request with a bearer token func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) { + if strings.ToLower(req.Raw().URL.Scheme) != "https" { + return nil, shared.NonRetriableError(errors.New("bearer token authentication is not permitted for non TLS protected (https) endpoints")) + } var err error if b.authzHandler.OnRequest != nil { err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req)) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go index 770e0a2b6..c230af0af 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_header.go @@ -34,6 +34,7 @@ func httpHeaderPolicy(req *policy.Request) (*http.Response, error) { // WithHTTPHeader adds the specified http.Header to the parent context. // Use this to specify custom HTTP headers at the API-call level. // Any overlapping headers will have their values replaced with the values specified here. +// Deprecated: use [policy.WithHTTPHeader] instead. func WithHTTPHeader(parent context.Context, header http.Header) context.Context { - return context.WithValue(parent, shared.CtxWithHTTPHeaderKey{}, header) + return policy.WithHTTPHeader(parent, header) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go index 4714baa30..bb00f6c2f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_include_response.go @@ -20,7 +20,7 @@ func includeResponsePolicy(req *policy.Request) (*http.Response, error) { if resp == nil { return resp, err } - if httpOutRaw := req.Raw().Context().Value(shared.CtxIncludeResponseKey{}); httpOutRaw != nil { + if httpOutRaw := req.Raw().Context().Value(shared.CtxWithCaptureResponse{}); httpOutRaw != nil { httpOut := httpOutRaw.(**http.Response) *httpOut = resp } @@ -29,6 +29,7 @@ func includeResponsePolicy(req *policy.Request) (*http.Response, error) { // WithCaptureResponse applies the HTTP response retrieval annotation to the parent context. // The resp parameter will contain the HTTP response after the request has completed. +// Deprecated: use [policy.WithCaptureResponse] instead. func WithCaptureResponse(parent context.Context, resp **http.Response) context.Context { - return context.WithValue(parent, shared.CtxIncludeResponseKey{}, resp) + return policy.WithCaptureResponse(parent, resp) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go new file mode 100644 index 000000000..2e47a5bad --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_key_credential.go @@ -0,0 +1,49 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// KeyCredentialPolicy authorizes requests with a [azcore.KeyCredential]. +type KeyCredentialPolicy struct { + cred *exported.KeyCredential + header string + prefix string +} + +// KeyCredentialPolicyOptions contains the optional values configuring [KeyCredentialPolicy]. +type KeyCredentialPolicyOptions struct { + // Prefix is used if the key requires a prefix before it's inserted into the HTTP request. + Prefix string +} + +// NewKeyCredentialPolicy creates a new instance of [KeyCredentialPolicy]. +// - cred is the [azcore.KeyCredential] used to authenticate with the service +// - header is the name of the HTTP request header in which the key is placed +// - options contains optional configuration, pass nil to accept the default values +func NewKeyCredentialPolicy(cred *exported.KeyCredential, header string, options *KeyCredentialPolicyOptions) *KeyCredentialPolicy { + if options == nil { + options = &KeyCredentialPolicyOptions{} + } + return &KeyCredentialPolicy{ + cred: cred, + header: header, + prefix: options.Prefix, + } +} + +// Do implementes the Do method on the [policy.Polilcy] interface. +func (k *KeyCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { + val := exported.KeyCredentialGet(k.cred) + if k.prefix != "" { + val = k.prefix + val + } + req.Raw().Header.Add(k.header, val) + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go index e0c5929f3..21fcb3968 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go @@ -209,8 +209,9 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) { // WithRetryOptions adds the specified RetryOptions to the parent context. // Use this to specify custom RetryOptions at the API-call level. +// Deprecated: use [policy.WithRetryOptions] instead. func WithRetryOptions(parent context.Context, options policy.RetryOptions) context.Context { - return context.WithValue(parent, shared.CtxWithRetryOptionsKey{}, options) + return policy.WithRetryOptions(parent, options) } // ********** The following type/methods implement the retryableRequestBody (a ReadSeekCloser) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go new file mode 100644 index 000000000..25266030b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_sas_credential.go @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// SASCredentialPolicy authorizes requests with a [azcore.SASCredential]. +type SASCredentialPolicy struct { + cred *exported.SASCredential + header string +} + +// SASCredentialPolicyOptions contains the optional values configuring [SASCredentialPolicy]. +type SASCredentialPolicyOptions struct { + // placeholder for future optional values +} + +// NewSASCredentialPolicy creates a new instance of [SASCredentialPolicy]. +// - cred is the [azcore.SASCredential] used to authenticate with the service +// - header is the name of the HTTP request header in which the shared access signature is placed +// - options contains optional configuration, pass nil to accept the default values +func NewSASCredentialPolicy(cred *exported.SASCredential, header string, options *SASCredentialPolicyOptions) *SASCredentialPolicy { + return &SASCredentialPolicy{ + cred: cred, + header: header, + } +} + +// Do implementes the Do method on the [policy.Polilcy] interface. +func (k *SASCredentialPolicy) Do(req *policy.Request) (*http.Response, error) { + req.Raw().Header.Add(k.header, exported.SASCredentialGet(k.cred)) + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go index 98e007184..b7e6fb26f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/request.go @@ -15,6 +15,7 @@ import ( "fmt" "io" "mime/multipart" + "net/url" "os" "path" "reflect" @@ -44,6 +45,19 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*polic return exported.NewRequest(ctx, httpMethod, endpoint) } +// EncodeQueryParams will parse and encode any query parameters in the specified URL. +func EncodeQueryParams(u string) (string, error) { + before, after, found := strings.Cut(u, "?") + if !found { + return u, nil + } + qp, err := url.ParseQuery(after) + if err != nil { + return "", err + } + return before + "?" + qp.Encode(), nil +} + // JoinPaths concatenates multiple URL path segments into one path, // inserting path separation characters as required. JoinPaths will preserve // query parameters in the root path diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go new file mode 100644 index 000000000..1c75d771f --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_other.go @@ -0,0 +1,15 @@ +//go:build !wasm + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return dialer.DialContext +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go new file mode 100644 index 000000000..3dc9eeecd --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_dialer_wasm.go @@ -0,0 +1,15 @@ +//go:build (js && wasm) || wasip1 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "net" +) + +func defaultTransportDialContext(dialer *net.Dialer) func(context.Context, string, string) (net.Conn, error) { + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go index dbb9fa7f8..589d09f2c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/transport_default_http_client.go @@ -18,10 +18,10 @@ var defaultHTTPClient *http.Client func init() { defaultTransport := &http.Transport{ Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ + DialContext: defaultTransportDialContext(&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, - }).DialContext, + }), ForceAttemptHTTP2: true, MaxIdleConns: 100, IdleConnTimeout: 90 * time.Second, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index ddb24d810..7ea119ab3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,10 +1,53 @@ # Release History +## 1.4.0 (2023-10-10) + +### Bugs Fixed +* `ManagedIdentityCredential` will now retry when IMDS responds 410 or 503 + +## 1.4.0-beta.5 (2023-09-12) + +### Features Added +* Service principal credentials can request CAE tokens + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.4.0-beta.4 +* Whether `GetToken` requests a CAE token is now determined by `TokenRequestOptions.EnableCAE`. Azure + SDK clients which support CAE will set this option automatically. Credentials no longer request CAE + tokens by default or observe the environment variable "AZURE_IDENTITY_DISABLE_CP1". + +### Bugs Fixed +* Credential chains such as `DefaultAzureCredential` now try their next credential, if any, when + managed identity authentication fails in a Docker Desktop container + ([#21417](https://github.com/Azure/azure-sdk-for-go/issues/21417)) + +## 1.4.0-beta.4 (2023-08-16) + +### Other Changes +* Upgraded dependencies + ## 1.3.1 (2023-08-16) ### Other Changes * Upgraded dependencies +## 1.4.0-beta.3 (2023-08-08) + +### Bugs Fixed +* One invocation of `AzureCLICredential.GetToken()` and `OnBehalfOfCredential.GetToken()` + can no longer make two authentication attempts + +## 1.4.0-beta.2 (2023-07-14) + +### Other Changes +* `DefaultAzureCredentialOptions.TenantID` applies to workload identity authentication +* Upgraded dependencies + +## 1.4.0-beta.1 (2023-06-06) + +### Other Changes +* Re-enabled CAE support as in v1.3.0-beta.3 + ## 1.3.0 (2023-05-09) ### Breaking Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 7b7515eba..fef099813 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -76,12 +76,14 @@ azlog.SetListener(func(event azlog.Event, s string) { azlog.SetEvents(azidentity.EventAuthentication) ``` + ## Troubleshoot DefaultAzureCredential authentication issues | Error |Description| Mitigation | |---|---|---| |"DefaultAzureCredential failed to acquire a token"|No credential in the `DefaultAzureCredential` chain provided a token|
  • [Enable logging](#enable-and-configure-logging) to get further diagnostic information.
  • Consult the troubleshooting guide for underlying credential types for more information.
    • [EnvironmentCredential](#troubleshoot-environmentcredential-authentication-issues)
    • [ManagedIdentityCredential](#troubleshoot-managedidentitycredential-authentication-issues)
    • [AzureCLICredential](#troubleshoot-azureclicredential-authentication-issues)
    | |Error from the client with a status code of 401 or 403|Authentication succeeded but the authorizing Azure service responded with a 401 (Unauthorized), or 403 (Forbidden) status code|
    • [Enable logging](#enable-and-configure-logging) to determine which credential in the chain returned the authenticating token.
    • If an unexpected credential is returning a token, check application configuration such as environment variables.
    • Ensure the correct role is assigned to the authenticated identity. For example, a service specific role rather than the subscription Owner role.
    | +|"managed identity timed out"|`DefaultAzureCredential` sets a short timeout on its first managed identity authentication attempt to prevent very long timeouts during local development when no managed identity is available. That timeout causes this error in production when an application requests a token before the hosting environment is ready to provide one.|Use [ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential) directly, at least in production. It doesn't set a timeout on its authentication attempts.| ## Troubleshoot EnvironmentCredential authentication issues diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index 739ff49c1..10b742ce1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -10,12 +10,12 @@ import ( "bytes" "context" "errors" + "fmt" "io" "net/http" "net/url" "os" "regexp" - "strings" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" @@ -41,65 +41,18 @@ const ( organizationsTenantID = "organizations" developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" defaultSuffix = "/.default" - tenantIDValidationErr = "invalid tenantID. You can locate your tenantID by following the instructions listed here: https://docs.microsoft.com/partner-center/find-ids-and-domain-names" ) var ( // capability CP1 indicates the client application is capable of handling CAE claims challenges - cp1 = []string{"CP1"} - // CP1 is disabled until CAE support is added back - disableCP1 = true + cp1 = []string{"CP1"} + errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names") ) -var getConfidentialClient = func(clientID, tenantID string, cred confidential.Credential, co *azcore.ClientOptions, additionalOpts ...confidential.Option) (confidentialClient, error) { - if !validTenantID(tenantID) { - return confidential.Client{}, errors.New(tenantIDValidationErr) - } - authorityHost, err := setAuthorityHost(co.Cloud) - if err != nil { - return confidential.Client{}, err - } - authority := runtime.JoinPaths(authorityHost, tenantID) - o := []confidential.Option{ - confidential.WithAzureRegion(os.Getenv(azureRegionalAuthorityName)), - confidential.WithHTTPClient(newPipelineAdapter(co)), - } - if !disableCP1 { - o = append(o, confidential.WithClientCapabilities(cp1)) - } - o = append(o, additionalOpts...) - if strings.ToLower(tenantID) == "adfs" { - o = append(o, confidential.WithInstanceDiscovery(false)) - } - return confidential.New(authority, clientID, cred, o...) -} - -var getPublicClient = func(clientID, tenantID string, co *azcore.ClientOptions, additionalOpts ...public.Option) (public.Client, error) { - if !validTenantID(tenantID) { - return public.Client{}, errors.New(tenantIDValidationErr) - } - authorityHost, err := setAuthorityHost(co.Cloud) - if err != nil { - return public.Client{}, err - } - o := []public.Option{ - public.WithAuthority(runtime.JoinPaths(authorityHost, tenantID)), - public.WithHTTPClient(newPipelineAdapter(co)), - } - if !disableCP1 { - o = append(o, public.WithClientCapabilities(cp1)) - } - o = append(o, additionalOpts...) - if strings.ToLower(tenantID) == "adfs" { - o = append(o, public.WithInstanceDiscovery(false)) - } - return public.New(clientID, o...) -} - // setAuthorityHost initializes the authority host for credentials. Precedence is: -// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user -// 2. value of AZURE_AUTHORITY_HOST -// 3. default: Azure Public Cloud +// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user +// 2. value of AZURE_AUTHORITY_HOST +// 3. default: Azure Public Cloud func setAuthorityHost(cc cloud.Configuration) (string, error) { host := cc.ActiveDirectoryAuthorityHost if host == "" { @@ -121,6 +74,41 @@ func setAuthorityHost(cc cloud.Configuration) (string, error) { return host, nil } +// resolveAdditionalTenants returns a copy of tenants, simplified when tenants contains a wildcard +func resolveAdditionalTenants(tenants []string) []string { + if len(tenants) == 0 { + return nil + } + for _, t := range tenants { + // a wildcard makes all other values redundant + if t == "*" { + return []string{"*"} + } + } + cp := make([]string, len(tenants)) + copy(cp, tenants) + return cp +} + +// resolveTenant returns the correct tenant for a token request +func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) { + if specified == "" || specified == defaultTenant { + return defaultTenant, nil + } + if defaultTenant == "adfs" { + return "", errors.New("ADFS doesn't support tenants") + } + if !validTenantID(specified) { + return "", errInvalidTenantID + } + for _, t := range additionalTenants { + if t == "*" || t == specified { + return specified, nil + } + } + return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified) +} + // validTenantID return true is it receives a valid tenantID, returns false otherwise func validTenantID(tenantID string) bool { match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID) @@ -173,7 +161,7 @@ func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { } // enables fakes for test scenarios -type confidentialClient interface { +type msalConfidentialClient interface { AcquireTokenSilent(ctx context.Context, scopes []string, options ...confidential.AcquireSilentOption) (confidential.AuthResult, error) AcquireTokenByAuthCode(ctx context.Context, code string, redirectURI string, scopes []string, options ...confidential.AcquireByAuthCodeOption) (confidential.AuthResult, error) AcquireTokenByCredential(ctx context.Context, scopes []string, options ...confidential.AcquireByCredentialOption) (confidential.AuthResult, error) @@ -181,7 +169,7 @@ type confidentialClient interface { } // enables fakes for test scenarios -type publicClient interface { +type msalPublicClient interface { AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username string, password string, options ...public.AcquireByUsernamePasswordOption) (public.AuthResult, error) AcquireTokenByDeviceCode(ctx context.Context, scopes []string, options ...public.AcquireByDeviceCodeOption) (public.DeviceCode, error) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index 33ff13c09..55a0d6543 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -17,10 +17,12 @@ import ( "regexp" "runtime" "strings" + "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" ) const ( @@ -47,14 +49,14 @@ type AzureCLICredentialOptions struct { // init returns an instance of AzureCLICredentialOptions initialized with default values. func (o *AzureCLICredentialOptions) init() { if o.tokenProvider == nil { - o.tokenProvider = defaultTokenProvider() + o.tokenProvider = defaultTokenProvider } } // AzureCLICredential authenticates as the identity logged in to the Azure CLI. type AzureCLICredential struct { - s *syncer - tokenProvider azureCLITokenProvider + mu *sync.Mutex + opts AzureCLICredentialOptions } // NewAzureCLICredential constructs an AzureCLICredential. Pass nil to accept default options. @@ -64,9 +66,8 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent cp = *options } cp.init() - c := AzureCLICredential{tokenProvider: cp.tokenProvider} - c.s = newSyncer(credNameAzureCLI, cp.TenantID, cp.AdditionallyAllowedTenants, c.requestToken, c.requestToken) - return &c, nil + cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants) + return &AzureCLICredential{mu: &sync.Mutex{}, opts: cp}, nil } // GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. @@ -75,13 +76,15 @@ func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequ if len(opts.Scopes) != 1 { return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") } - // CLI expects an AAD v1 resource, not a v2 scope + tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureCLI, c.opts.AdditionallyAllowedTenants) + if err != nil { + return azcore.AccessToken{}, err + } + // pass the CLI an AAD v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} - return c.s.GetToken(ctx, opts) -} - -func (c *AzureCLICredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - b, err := c.tokenProvider(ctx, opts.Scopes[0], opts.TenantID) + c.mu.Lock() + defer c.mu.Unlock() + b, err := c.opts.tokenProvider(ctx, opts.Scopes[0], tenant) if err != nil { return azcore.AccessToken{}, err } @@ -89,61 +92,61 @@ func (c *AzureCLICredential) requestToken(ctx context.Context, opts policy.Token if err != nil { return azcore.AccessToken{}, err } + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureCLI, strings.Join(opts.Scopes, ", ")) + log.Write(EventAuthentication, msg) return at, nil } -func defaultTokenProvider() func(ctx context.Context, resource string, tenantID string) ([]byte, error) { - return func(ctx context.Context, resource string, tenantID string) ([]byte, error) { - match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) - if err != nil { - return nil, err - } - if !match { - return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) - } - - // set a default timeout for this authentication iff the application hasn't done so already - var cancel context.CancelFunc - if _, hasDeadline := ctx.Deadline(); !hasDeadline { - ctx, cancel = context.WithTimeout(ctx, timeoutCLIRequest) - defer cancel() - } - - commandLine := "az account get-access-token -o json --resource " + resource - if tenantID != "" { - commandLine += " --tenant " + tenantID - } - var cliCmd *exec.Cmd - if runtime.GOOS == "windows" { - dir := os.Getenv("SYSTEMROOT") - if dir == "" { - return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") - } - cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) - cliCmd.Dir = dir - } else { - cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) - cliCmd.Dir = "/bin" - } - cliCmd.Env = os.Environ() - var stderr bytes.Buffer - cliCmd.Stderr = &stderr - - output, err := cliCmd.Output() - if err != nil { - msg := stderr.String() - var exErr *exec.ExitError - if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { - msg = "Azure CLI not found on path" - } - if msg == "" { - msg = err.Error() - } - return nil, newCredentialUnavailableError(credNameAzureCLI, msg) - } - - return output, nil +var defaultTokenProvider azureCLITokenProvider = func(ctx context.Context, resource string, tenantID string) ([]byte, error) { + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err } + if !match { + return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) + } + + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, timeoutCLIRequest) + defer cancel() + } + + commandLine := "az account get-access-token -o json --resource " + resource + if tenantID != "" { + commandLine += " --tenant " + tenantID + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'az' is not recognized") { + msg = "Azure CLI not found on path" + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureCLI, msg) + } + + return output, nil } func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 3b443e8ee..9002ea0b0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -26,22 +26,9 @@ stages: parameters: RunLiveTests: true ServiceDirectory: 'azidentity' - PreSteps: - - pwsh: | - [System.Convert]::FromBase64String($env:PFX_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/test.pfx -AsByteStream - Set-Content -Path $(Agent.TempDirectory)/test.pem -Value $env:PEM_CONTENTS - [System.Convert]::FromBase64String($env:SNI_CONTENTS) | Set-Content -Path $(Agent.TempDirectory)/testsni.pfx -AsByteStream - env: - PFX_CONTENTS: $(net-identity-spcert-pfx) - PEM_CONTENTS: $(net-identity-spcert-pem) - SNI_CONTENTS: $(net-identity-spcert-sni) - EnvVars: - AZURE_IDENTITY_TEST_TENANTID: $(net-identity-tenantid) - AZURE_IDENTITY_TEST_USERNAME: $(net-identity-username) - AZURE_IDENTITY_TEST_PASSWORD: $(net-identity-password) - IDENTITY_SP_TENANT_ID: $(net-identity-sp-tenantid) - IDENTITY_SP_CLIENT_ID: $(net-identity-sp-clientid) - IDENTITY_SP_CLIENT_SECRET: $(net-identity-sp-clientsecret) - IDENTITY_SP_CERT_PEM: $(Agent.TempDirectory)/test.pem - IDENTITY_SP_CERT_PFX: $(Agent.TempDirectory)/test.pfx - IDENTITY_SP_CERT_SNI: $(Agent.TempDirectory)/testsni.pfx + CloudConfig: + Public: + SubscriptionConfigurations: + - $(sub-config-azure-cloud-test-resources) + # Contains alternate tenant, AAD app and cert info for testing + - $(sub-config-identity-test-resources) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index d9d22996c..303d5fc09 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -24,8 +24,7 @@ const credNameAssertion = "ClientAssertionCredential" // // [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format type ClientAssertionCredential struct { - client confidentialClient - s *syncer + client *confidentialClient } // ClientAssertionCredentialOptions contains optional parameters for ClientAssertionCredential. @@ -56,28 +55,21 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c return getAssertion(ctx) }, ) - c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + msalOpts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts) if err != nil { return nil, err } - cac := ClientAssertionCredential{client: c} - cac.s = newSyncer(credNameAssertion, tenantID, options.AdditionallyAllowedTenants, cac.requestToken, cac.silentAuth) - return &cac, nil + return &ClientAssertionCredential{client: c}, nil } // GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) -} - -func (c *ClientAssertionCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *ClientAssertionCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return c.client.GetToken(ctx, opts) } var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index 804eba899..d3300e305 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -42,8 +42,7 @@ type ClientCertificateCredentialOptions struct { // ClientCertificateCredential authenticates a service principal with a certificate. type ClientCertificateCredential struct { - client confidentialClient - s *syncer + client *confidentialClient } // NewClientCertificateCredential constructs a ClientCertificateCredential. Pass nil for options to accept defaults. @@ -58,33 +57,22 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x if err != nil { return nil, err } - var o []confidential.Option - if options.SendCertificateChain { - o = append(o, confidential.WithX5C()) + msalOpts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, } - o = append(o, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) - c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, o...) + c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts) if err != nil { return nil, err } - cc := ClientCertificateCredential{client: c} - cc.s = newSyncer(credNameCert, tenantID, options.AdditionallyAllowedTenants, cc.requestToken, cc.silentAuth) - return &cc, nil + return &ClientCertificateCredential{client: c}, nil } // GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) -} - -func (c *ClientCertificateCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *ClientCertificateCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return c.client.GetToken(ctx, opts) } // ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go index dda21f6b8..d2ff7582b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -33,8 +33,7 @@ type ClientSecretCredentialOptions struct { // ClientSecretCredential authenticates an application with a client secret. type ClientSecretCredential struct { - client confidentialClient - s *syncer + client *confidentialClient } // NewClientSecretCredential constructs a ClientSecretCredential. Pass nil for options to accept defaults. @@ -46,30 +45,21 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st if err != nil { return nil, err } - c, err := getConfidentialClient( - clientID, tenantID, cred, &options.ClientOptions, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery), - ) + msalOpts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + } + c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts) if err != nil { return nil, err } - csc := ClientSecretCredential{client: c} - csc.s = newSyncer(credNameSecret, tenantID, options.AdditionallyAllowedTenants, csc.requestToken, csc.silentAuth) - return &csc, nil + return &ClientSecretCredential{c}, nil } // GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) -} - -func (c *ClientSecretCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *ClientSecretCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return c.client.GetToken(ctx, opts) } var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go new file mode 100644 index 000000000..4853a9a00 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -0,0 +1,156 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "errors" + "fmt" + "os" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" +) + +type confidentialClientOptions struct { + azcore.ClientOptions + + AdditionallyAllowedTenants []string + // Assertion for on-behalf-of authentication + Assertion string + DisableInstanceDiscovery, SendX5C bool +} + +// confidentialClient wraps the MSAL confidential client +type confidentialClient struct { + cae, noCAE msalConfidentialClient + caeMu, noCAEMu, clientMu *sync.Mutex + clientID, tenantID string + cred confidential.Credential + host string + name string + opts confidentialClientOptions + region string +} + +func newConfidentialClient(tenantID, clientID, name string, cred confidential.Credential, opts confidentialClientOptions) (*confidentialClient, error) { + if !validTenantID(tenantID) { + return nil, errInvalidTenantID + } + host, err := setAuthorityHost(opts.Cloud) + if err != nil { + return nil, err + } + opts.AdditionallyAllowedTenants = resolveAdditionalTenants(opts.AdditionallyAllowedTenants) + return &confidentialClient{ + caeMu: &sync.Mutex{}, + clientID: clientID, + clientMu: &sync.Mutex{}, + cred: cred, + host: host, + name: name, + noCAEMu: &sync.Mutex{}, + opts: opts, + region: os.Getenv(azureRegionalAuthorityName), + tenantID: tenantID, + }, nil +} + +// GetToken requests an access token from MSAL, checking the cache first. +func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(tro.Scopes) < 1 { + return azcore.AccessToken{}, fmt.Errorf("%s.GetToken() requires at least one scope", c.name) + } + // we don't resolve the tenant for managed identities because they acquire tokens only from their home tenants + if c.name != credNameManagedIdentity { + tenant, err := c.resolveTenant(tro.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + tro.TenantID = tenant + } + client, mu, err := c.client(ctx, tro) + if err != nil { + return azcore.AccessToken{}, err + } + mu.Lock() + defer mu.Unlock() + var ar confidential.AuthResult + if c.opts.Assertion != "" { + ar, err = client.AcquireTokenOnBehalfOf(ctx, c.opts.Assertion, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID)) + } else { + ar, err = client.AcquireTokenSilent(ctx, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID)) + if err != nil { + ar, err = client.AcquireTokenByCredential(ctx, tro.Scopes, confidential.WithClaims(tro.Claims), confidential.WithTenantID(tro.TenantID)) + } + } + if err != nil { + // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code. + // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError. + var unavailableErr *credentialUnavailableError + if !errors.As(err, &unavailableErr) { + res := getResponseFromError(err) + err = newAuthenticationFailedError(c.name, err.Error(), res, err) + } + } else { + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +func (c *confidentialClient) client(ctx context.Context, tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) { + c.clientMu.Lock() + defer c.clientMu.Unlock() + if tro.EnableCAE { + if c.cae == nil { + client, err := c.newMSALClient(true) + if err != nil { + return nil, nil, err + } + c.cae = client + } + return c.cae, c.caeMu, nil + } + if c.noCAE == nil { + client, err := c.newMSALClient(false) + if err != nil { + return nil, nil, err + } + c.noCAE = client + } + return c.noCAE, c.noCAEMu, nil +} + +func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) { + authority := runtime.JoinPaths(c.host, c.tenantID) + o := []confidential.Option{ + confidential.WithAzureRegion(c.region), + confidential.WithHTTPClient(newPipelineAdapter(&c.opts.ClientOptions)), + } + if enableCAE { + o = append(o, confidential.WithClientCapabilities(cp1)) + } + if c.opts.SendX5C { + o = append(o, confidential.WithX5C()) + } + if c.opts.DisableInstanceDiscovery || strings.ToLower(c.tenantID) == "adfs" { + o = append(o, confidential.WithInstanceDiscovery(false)) + } + return confidential.New(authority, c.clientID, c.cred, o...) +} + +// resolveTenant returns the correct tenant for a token request given the client's +// configuration, or an error when that configuration doesn't allow the specified tenant +func (c *confidentialClient) resolveTenant(specified string) (string, error) { + return resolveTenant(c.tenantID, specified, c.name, c.opts.AdditionallyAllowedTenants) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 1e3efdc97..7647c60b1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -21,6 +21,8 @@ import ( // DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential. // These options may not apply to all credentials in the chain. type DefaultAzureCredentialOptions struct { + // ClientOptions has additional options for credentials that use an Azure SDK HTTP pipeline. These options don't apply + // to credential types that authenticate via external tools such as the Azure CLI. azcore.ClientOptions // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add @@ -32,8 +34,7 @@ type DefaultAzureCredentialOptions struct { // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - // TenantID identifies the tenant the Azure CLI should authenticate in. - // Defaults to the CLI's default tenant, which is typically the home tenant of the user logged in to the CLI. + // TenantID sets the default tenant for authentication via the Azure CLI and workload identity. TenantID string } @@ -83,11 +84,11 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err}) } - // workload identity requires values for AZURE_AUTHORITY_HOST, AZURE_CLIENT_ID, AZURE_FEDERATED_TOKEN_FILE, AZURE_TENANT_ID wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{ AdditionallyAllowedTenants: additionalTenants, ClientOptions: options.ClientOptions, DisableInstanceDiscovery: options.DisableInstanceDiscovery, + TenantID: options.TenantID, }) if err == nil { creds = append(creds, wic) @@ -95,6 +96,7 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error()) creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err}) } + o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions} if ID, ok := os.LookupEnv(azureClientID); ok { o.ID = ClientID(ID) @@ -115,9 +117,8 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) } - err = defaultAzureCredentialConstructorErrorHandler(len(creds), errorMessages) - if err != nil { - return nil, err + if len(errorMessages) > 0 { + log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", strings.Join(errorMessages, "\n\t")) } chain, err := NewChainedTokenCredential(creds, nil) @@ -135,20 +136,6 @@ func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.Token var _ azcore.TokenCredential = (*DefaultAzureCredential)(nil) -func defaultAzureCredentialConstructorErrorHandler(numberOfSuccessfulCredentials int, errorMessages []string) (err error) { - errorMessage := strings.Join(errorMessages, "\n\t") - - if numberOfSuccessfulCredentials == 0 { - return errors.New(errorMessage) - } - - if len(errorMessages) != 0 { - log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", errorMessage) - } - - return nil -} - // defaultCredentialErrorReporter is a substitute for credentials that couldn't be constructed. // Its GetToken method always returns a credentialUnavailableError having the same message as // the error that prevented constructing the credential. This ensures the message is present @@ -185,7 +172,7 @@ func (w *timeoutWrapper) GetToken(ctx context.Context, opts policy.TokenRequestO defer cancel() tk, err = w.mic.GetToken(c, opts) if isAuthFailedDueToContext(err) { - err = newCredentialUnavailableError(credNameManagedIdentity, "managed identity timed out") + err = newCredentialUnavailableError(credNameManagedIdentity, "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information") } else { // some managed identity implementation is available, so don't apply the timeout to future calls w.timeout = 0 diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index 108e83c43..d245c269a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -12,7 +12,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) const credNameDeviceCode = "DeviceCodeCredential" @@ -74,10 +73,7 @@ type DeviceCodeMessage struct { // If a web browser is available, InteractiveBrowserCredential is more convenient because it // automatically opens a browser to the login page. type DeviceCodeCredential struct { - account public.Account - client publicClient - s *syncer - prompt func(context.Context, DeviceCodeMessage) error + client *publicClient } // NewDeviceCodeCredential creates a DeviceCodeCredential. Pass nil to accept default options. @@ -87,50 +83,24 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC cp = *options } cp.init() - c, err := getPublicClient( - cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery), - ) + msalOpts := publicClientOptions{ + AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + ClientOptions: cp.ClientOptions, + DeviceCodePrompt: cp.UserPrompt, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + } + c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts) if err != nil { return nil, err } - cred := DeviceCodeCredential{client: c, prompt: cp.UserPrompt} - cred.s = newSyncer(credNameDeviceCode, cp.TenantID, cp.AdditionallyAllowedTenants, cred.requestToken, cred.silentAuth) - return &cred, nil + c.name = credNameDeviceCode + return &DeviceCodeCredential{client: c}, nil } // GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication. // This method is called automatically by Azure SDK clients. func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) -} - -func (c *DeviceCodeCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - dc, err := c.client.AcquireTokenByDeviceCode(ctx, opts.Scopes, public.WithTenantID(opts.TenantID)) - if err != nil { - return azcore.AccessToken{}, err - } - err = c.prompt(ctx, DeviceCodeMessage{ - Message: dc.Result.Message, - UserCode: dc.Result.UserCode, - VerificationURL: dc.Result.VerificationURL, - }) - if err != nil { - return azcore.AccessToken{}, err - } - ar, err := dc.AuthenticationResult(ctx) - if err != nil { - return azcore.AccessToken{}, err - } - c.account = ar.Account - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *DeviceCodeCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, - public.WithSilentAccount(c.account), - public.WithTenantID(opts.TenantID), - ) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return c.client.GetToken(ctx, opts) } var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index 86d8976a4..e1a21e003 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -11,9 +11,9 @@ import ( "encoding/json" "errors" "fmt" - "io" "net/http" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo" msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" ) @@ -57,17 +57,16 @@ func (e *AuthenticationFailedError) Error() string { fmt.Fprintln(msg, "--------------------------------------------------------------------------------") fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - body, err := io.ReadAll(e.RawResponse.Body) - e.RawResponse.Body.Close() - if err != nil { + body, err := runtime.Payload(e.RawResponse) + switch { + case err != nil: fmt.Fprintf(msg, "Error reading response body: %v", err) - } else if len(body) > 0 { - e.RawResponse.Body = io.NopCloser(bytes.NewReader(body)) + case len(body) > 0: if err := json.Indent(msg, body, "", " "); err != nil { // failed to pretty-print so just dump it verbatim fmt.Fprint(msg, string(body)) } - } else { + default: fmt.Fprint(msg, "Response contained no body") } fmt.Fprintln(msg, "\n--------------------------------------------------------------------------------") diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 4868d22c3..08f3efbf3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -11,7 +11,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) const credNameBrowser = "InteractiveBrowserCredential" @@ -56,10 +55,7 @@ func (o *InteractiveBrowserCredentialOptions) init() { // InteractiveBrowserCredential opens a browser to interactively authenticate a user. type InteractiveBrowserCredential struct { - account public.Account - client publicClient - options InteractiveBrowserCredentialOptions - s *syncer + client *publicClient } // NewInteractiveBrowserCredential constructs a new InteractiveBrowserCredential. Pass nil to accept default options. @@ -69,38 +65,22 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption cp = *options } cp.init() - c, err := getPublicClient(cp.ClientID, cp.TenantID, &cp.ClientOptions, public.WithInstanceDiscovery(!cp.DisableInstanceDiscovery)) + msalOpts := publicClientOptions{ + ClientOptions: cp.ClientOptions, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + LoginHint: cp.LoginHint, + RedirectURL: cp.RedirectURL, + } + c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts) if err != nil { return nil, err } - ibc := InteractiveBrowserCredential{client: c, options: cp} - ibc.s = newSyncer(credNameBrowser, cp.TenantID, cp.AdditionallyAllowedTenants, ibc.requestToken, ibc.silentAuth) - return &ibc, nil + return &InteractiveBrowserCredential{client: c}, nil } // GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) -} - -func (c *InteractiveBrowserCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenInteractive(ctx, opts.Scopes, - public.WithLoginHint(c.options.LoginHint), - public.WithRedirectURI(c.options.RedirectURL), - public.WithTenantID(opts.TenantID), - ) - if err == nil { - c.account = ar.Account - } - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *InteractiveBrowserCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, - public.WithSilentAccount(c.account), - public.WithTenantID(opts.TenantID), - ) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return c.client.GetToken(ctx, opts) } var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index d7b4a32a5..fdc3c1f67 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -84,13 +84,15 @@ func setIMDSRetryOptionDefaults(o *policy.RetryOptions) { } if o.StatusCodes == nil { o.StatusCodes = []int{ - // IMDS docs recommend retrying 404, 429 and all 5xx - // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling + // IMDS docs recommend retrying 404, 410, 429 and 5xx + // https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token#error-handling http.StatusNotFound, // 404 + http.StatusGone, // 410 http.StatusTooManyRequests, // 429 http.StatusInternalServerError, // 500 http.StatusNotImplemented, // 501 http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 http.StatusGatewayTimeout, // 504 http.StatusHTTPVersionNotSupported, // 505 http.StatusVariantAlsoNegotiates, // 506 @@ -175,11 +177,25 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return c.createAccessToken(resp) } - if c.msiType == msiTypeIMDS && resp.StatusCode == 400 { - if id != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + if c.msiType == msiTypeIMDS { + switch resp.StatusCode { + case http.StatusBadRequest: + if id != nil { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + } + msg := "failed to authenticate a system assigned identity" + if body, err := runtime.Payload(resp); err == nil && len(body) > 0 { + msg += fmt.Sprintf(". The endpoint responded with %s", body) + } + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg) + case http.StatusForbidden: + // Docker Desktop runs a proxy that responds 403 to IMDS token requests. If we get that response, + // we return credentialUnavailableError so credential chains continue to their next credential + body, err := runtime.Payload(resp) + if err == nil && strings.Contains(string(body), "A socket operation was attempted to an unreachable network") { + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body))) + } } - return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "no default identity is assigned to this resource") } return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index c6710ae52..35c5e6725 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -8,7 +8,6 @@ package azidentity import ( "context" - "errors" "fmt" "strings" @@ -71,9 +70,8 @@ type ManagedIdentityCredentialOptions struct { // user-assigned identity. See Azure Active Directory documentation for more information about managed identities: // https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { - client confidentialClient + client *confidentialClient mic *managedIdentityClient - s *syncer } // NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. @@ -93,35 +91,23 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M if options.ID != nil { clientID = options.ID.String() } - // similarly, it's okay to give MSAL an incorrect authority URL because that URL won't be used - c, err := confidential.New("https://login.microsoftonline.com/common", clientID, cred) + // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value + c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{}) if err != nil { return nil, err } - m := ManagedIdentityCredential{client: c, mic: mic} - m.s = newSyncer(credNameManagedIdentity, "", nil, m.requestToken, m.silentAuth) - return &m, nil + return &ManagedIdentityCredential{client: c, mic: mic}, nil } // GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { if len(opts.Scopes) != 1 { - err := errors.New(credNameManagedIdentity + ": GetToken() requires exactly one scope") + err := fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity) return azcore.AccessToken{}, err } // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} - return c.s.GetToken(ctx, opts) -} - -func (c *ManagedIdentityCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenByCredential(ctx, opts.Scopes) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *ManagedIdentityCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return c.client.GetToken(ctx, opts) } var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go index 3e173f47d..2b360b681 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -25,9 +25,7 @@ const credNameOBO = "OnBehalfOfCredential" // // [Azure Active Directory documentation]: https://docs.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow type OnBehalfOfCredential struct { - assertion string - client confidentialClient - s *syncer + client *confidentialClient } // OnBehalfOfCredentialOptions contains optional parameters for OnBehalfOfCredential @@ -72,28 +70,23 @@ func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred conf if options == nil { options = &OnBehalfOfCredentialOptions{} } - opts := []confidential.Option{} - if options.SendCertificateChain { - opts = append(opts, confidential.WithX5C()) + opts := confidentialClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Assertion: userAssertion, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, } - opts = append(opts, confidential.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) - c, err := getConfidentialClient(clientID, tenantID, cred, &options.ClientOptions, opts...) + c, err := newConfidentialClient(tenantID, clientID, credNameOBO, cred, opts) if err != nil { return nil, err } - obo := OnBehalfOfCredential{assertion: userAssertion, client: c} - obo.s = newSyncer(credNameOBO, tenantID, options.AdditionallyAllowedTenants, obo.requestToken, obo.requestToken) - return &obo, nil + return &OnBehalfOfCredential{c}, nil } // GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return o.s.GetToken(ctx, opts) -} - -func (o *OnBehalfOfCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := o.client.AcquireTokenOnBehalfOf(ctx, o.assertion, opts.Scopes, confidential.WithTenantID(opts.TenantID)) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return o.client.GetToken(ctx, opts) } var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go new file mode 100644 index 000000000..6512d3e25 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -0,0 +1,178 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +type publicClientOptions struct { + azcore.ClientOptions + + AdditionallyAllowedTenants []string + DeviceCodePrompt func(context.Context, DeviceCodeMessage) error + DisableInstanceDiscovery bool + LoginHint, RedirectURL string + Username, Password string +} + +// publicClient wraps the MSAL public client +type publicClient struct { + account public.Account + cae, noCAE msalPublicClient + caeMu, noCAEMu, clientMu *sync.Mutex + clientID, tenantID string + host string + name string + opts publicClientOptions +} + +func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*publicClient, error) { + if !validTenantID(tenantID) { + return nil, errInvalidTenantID + } + host, err := setAuthorityHost(o.Cloud) + if err != nil { + return nil, err + } + o.AdditionallyAllowedTenants = resolveAdditionalTenants(o.AdditionallyAllowedTenants) + return &publicClient{ + caeMu: &sync.Mutex{}, + clientID: clientID, + clientMu: &sync.Mutex{}, + host: host, + name: name, + noCAEMu: &sync.Mutex{}, + opts: o, + tenantID: tenantID, + }, nil +} + +// GetToken requests an access token from MSAL, checking the cache first. +func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { + if len(tro.Scopes) < 1 { + return azcore.AccessToken{}, fmt.Errorf("%s.GetToken() requires at least one scope", p.name) + } + tenant, err := p.resolveTenant(tro.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + client, mu, err := p.client(tro) + if err != nil { + return azcore.AccessToken{}, err + } + mu.Lock() + defer mu.Unlock() + ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.account), public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + if err == nil { + return p.token(ar, err) + } + at, err := p.reqToken(ctx, client, tro) + if err == nil { + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) + } + return at, err +} + +// reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache. +func (p *publicClient) reqToken(ctx context.Context, c msalPublicClient, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { + tenant, err := p.resolveTenant(tro.TenantID) + if err != nil { + return azcore.AccessToken{}, err + } + var ar public.AuthResult + switch p.name { + case credNameBrowser: + ar, err = c.AcquireTokenInteractive(ctx, tro.Scopes, + public.WithClaims(tro.Claims), + public.WithLoginHint(p.opts.LoginHint), + public.WithRedirectURI(p.opts.RedirectURL), + public.WithTenantID(tenant), + ) + case credNameDeviceCode: + dc, e := c.AcquireTokenByDeviceCode(ctx, tro.Scopes, public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + if e != nil { + return azcore.AccessToken{}, e + } + err = p.opts.DeviceCodePrompt(ctx, DeviceCodeMessage{ + Message: dc.Result.Message, + UserCode: dc.Result.UserCode, + VerificationURL: dc.Result.VerificationURL, + }) + if err == nil { + ar, err = dc.AuthenticationResult(ctx) + } + case credNameUserPassword: + ar, err = c.AcquireTokenByUsernamePassword(ctx, tro.Scopes, p.opts.Username, p.opts.Password, public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + default: + return azcore.AccessToken{}, fmt.Errorf("unknown credential %q", p.name) + } + return p.token(ar, err) +} + +func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient, *sync.Mutex, error) { + p.clientMu.Lock() + defer p.clientMu.Unlock() + if tro.EnableCAE { + if p.cae == nil { + client, err := p.newMSALClient(true) + if err != nil { + return nil, nil, err + } + p.cae = client + } + return p.cae, p.caeMu, nil + } + if p.noCAE == nil { + client, err := p.newMSALClient(false) + if err != nil { + return nil, nil, err + } + p.noCAE = client + } + return p.noCAE, p.noCAEMu, nil +} + +func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { + o := []public.Option{ + public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)), + public.WithHTTPClient(newPipelineAdapter(&p.opts.ClientOptions)), + } + if enableCAE { + o = append(o, public.WithClientCapabilities(cp1)) + } + if p.opts.DisableInstanceDiscovery || strings.ToLower(p.tenantID) == "adfs" { + o = append(o, public.WithInstanceDiscovery(false)) + } + return public.New(p.clientID, o...) +} + +func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) { + if err == nil { + p.account = ar.Account + } else { + res := getResponseFromError(err) + err = newAuthenticationFailedError(p.name, err.Error(), res, err) + } + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err +} + +// resolveTenant returns the correct tenant for a token request given the client's +// configuration, or an error when that configuration doesn't allow the specified tenant +func (p *publicClient) resolveTenant(specified string) (string, error) { + return resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go deleted file mode 100644 index ae3855599..000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/syncer.go +++ /dev/null @@ -1,130 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package azidentity - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - - "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/internal/log" -) - -type authFn func(context.Context, policy.TokenRequestOptions) (azcore.AccessToken, error) - -// syncer synchronizes authentication calls so that goroutines can share a credential instance -type syncer struct { - addlTenants []string - authing bool - cond *sync.Cond - reqToken, silent authFn - name, tenant string -} - -func newSyncer(name, tenant string, additionalTenants []string, reqToken, silentAuth authFn) *syncer { - return &syncer{ - addlTenants: resolveAdditionalTenants(additionalTenants), - cond: &sync.Cond{L: &sync.Mutex{}}, - name: name, - reqToken: reqToken, - silent: silentAuth, - tenant: tenant, - } -} - -// GetToken ensures that only one goroutine authenticates at a time -func (s *syncer) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - var at azcore.AccessToken - var err error - if len(opts.Scopes) == 0 { - return at, errors.New(s.name + ".GetToken() requires at least one scope") - } - // we don't resolve the tenant for managed identities because they can acquire tokens only from their home tenants - if s.name != credNameManagedIdentity { - tenant, err := s.resolveTenant(opts.TenantID) - if err != nil { - return at, err - } - opts.TenantID = tenant - } - auth := false - s.cond.L.Lock() - defer s.cond.L.Unlock() - for { - at, err = s.silent(ctx, opts) - if err == nil { - // got a token - break - } - if !s.authing { - // this goroutine will request a token - s.authing, auth = true, true - break - } - // another goroutine is acquiring a token; wait for it to finish, then try silent auth again - s.cond.Wait() - } - if auth { - s.authing = false - at, err = s.reqToken(ctx, opts) - s.cond.Broadcast() - } - if err != nil { - // Return credentialUnavailableError directly because that type affects the behavior of credential chains. - // Otherwise, return AuthenticationFailedError. - var unavailableErr *credentialUnavailableError - if !errors.As(err, &unavailableErr) { - res := getResponseFromError(err) - err = newAuthenticationFailedError(s.name, err.Error(), res, err) - } - } else if log.Should(EventAuthentication) { - scope := strings.Join(opts.Scopes, ", ") - msg := fmt.Sprintf(`%s.GetToken() acquired a token for scope "%s"\n`, s.name, scope) - log.Write(EventAuthentication, msg) - } - return at, err -} - -// resolveTenant returns the correct tenant for a token request given the credential's -// configuration, or an error when the specified tenant isn't allowed by that configuration -func (s *syncer) resolveTenant(requested string) (string, error) { - if requested == "" || requested == s.tenant { - return s.tenant, nil - } - if s.tenant == "adfs" { - return "", errors.New("ADFS doesn't support tenants") - } - if !validTenantID(requested) { - return "", errors.New(tenantIDValidationErr) - } - for _, t := range s.addlTenants { - if t == "*" || t == requested { - return requested, nil - } - } - return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, s.name, requested) -} - -// resolveAdditionalTenants returns a copy of tenants, simplified when tenants contains a wildcard -func resolveAdditionalTenants(tenants []string) []string { - if len(tenants) == 0 { - return nil - } - for _, t := range tenants { - // a wildcard makes all other values redundant - if t == "*" { - return []string{"*"} - } - } - cp := make([]string, len(tenants)) - copy(cp, tenants) - return cp -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 new file mode 100644 index 000000000..fe0183add --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-pre.ps1 @@ -0,0 +1,36 @@ +[CmdletBinding(SupportsShouldProcess = $true, ConfirmImpact = 'Medium')] +param ( + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). + [Parameter(ValueFromRemainingArguments = $true)] + $RemainingArguments +) + +if (!$CI) { + # TODO: Remove this once auto-cloud config downloads are supported locally + Write-Host "Skipping cert setup in local testing mode" + return +} + +if ($EnvironmentVariables -eq $null -or $EnvironmentVariables.Count -eq 0) { + throw "EnvironmentVariables must be set in the calling script New-TestResources.ps1" +} + +$tmp = $env:TEMP ? $env:TEMP : [System.IO.Path]::GetTempPath() +$pfxPath = Join-Path $tmp "test.pfx" +$pemPath = Join-Path $tmp "test.pem" +$sniPath = Join-Path $tmp "testsni.pfx" + +Write-Host "Creating identity test files: $pfxPath $pemPath $sniPath" + +[System.Convert]::FromBase64String($EnvironmentVariables['PFX_CONTENTS']) | Set-Content -Path $pfxPath -AsByteStream +Set-Content -Path $pemPath -Value $EnvironmentVariables['PEM_CONTENTS'] +[System.Convert]::FromBase64String($EnvironmentVariables['SNI_CONTENTS']) | Set-Content -Path $sniPath -AsByteStream + +# Set for pipeline +Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PFX;]$pfxPath" +Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_PEM;]$pemPath" +Write-Host "##vso[task.setvariable variable=IDENTITY_SP_CERT_SNI;]$sniPath" +# Set for local +$env:IDENTITY_SP_CERT_PFX = $pfxPath +$env:IDENTITY_SP_CERT_PEM = $pemPath +$env:IDENTITY_SP_CERT_SNI = $sniPath diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep new file mode 100644 index 000000000..b3490d3b5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep @@ -0,0 +1 @@ +param baseName string diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index 8e652e33f..f787ec0ce 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -11,7 +11,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) const credNameUserPassword = "UsernamePasswordCredential" @@ -36,10 +35,7 @@ type UsernamePasswordCredentialOptions struct { // with any form of multi-factor authentication, and the application must already have user or admin consent. // This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts. type UsernamePasswordCredential struct { - account public.Account - client publicClient - password, username string - s *syncer + client *publicClient } // NewUsernamePasswordCredential creates a UsernamePasswordCredential. clientID is the ID of the application the user @@ -48,34 +44,23 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st if options == nil { options = &UsernamePasswordCredentialOptions{} } - c, err := getPublicClient(clientID, tenantID, &options.ClientOptions, public.WithInstanceDiscovery(!options.DisableInstanceDiscovery)) + opts := publicClientOptions{ + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + Password: password, + Username: username, + } + c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts) if err != nil { return nil, err } - upc := UsernamePasswordCredential{client: c, password: password, username: username} - upc.s = newSyncer(credNameUserPassword, tenantID, options.AdditionallyAllowedTenants, upc.requestToken, upc.silentAuth) - return &upc, nil + return &UsernamePasswordCredential{client: c}, err } // GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.s.GetToken(ctx, opts) -} - -func (c *UsernamePasswordCredential) requestToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenByUsernamePassword(ctx, opts.Scopes, c.username, c.password, public.WithTenantID(opts.TenantID)) - if err == nil { - c.account = ar.Account - } - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err -} - -func (c *UsernamePasswordCredential) silentAuth(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - ar, err := c.client.AcquireTokenSilent(ctx, opts.Scopes, - public.WithSilentAccount(c.account), - public.WithTenantID(opts.TenantID), - ) - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return c.client.GetToken(ctx, opts) } var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 38a1f420b..65e74e31e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -11,5 +11,5 @@ const ( component = "azidentity" // Version is the semantic version (see http://semver.org) of this module. - version = "v1.3.1" + version = "v1.4.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go index 7bfb34367..7e016324d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -47,7 +47,7 @@ type WorkloadIdentityCredentialOptions struct { DisableInstanceDiscovery bool // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. TenantID string - // TokenFilePath is the path a file containing the workload identity token. Defaults to the value of the + // TokenFilePath is the path of a file containing a Kubernetes service account token. Defaults to the value of the // environment variable AZURE_FEDERATED_TOKEN_FILE. TokenFilePath string } @@ -88,7 +88,7 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) ( return nil, err } // we want "WorkloadIdentityCredential" in log messages, not "ClientAssertionCredential" - cred.s.name = credNameWorkloadIdentity + cred.client.name = credNameWorkloadIdentity w.cred = cred return &w, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go index ade7b348e..8ee66b526 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo/errorinfo.go @@ -14,3 +14,33 @@ type NonRetriable interface { error NonRetriable() } + +// NonRetriableError marks the specified error as non-retriable. +// This function takes an error as input and returns a new error that is marked as non-retriable. +func NonRetriableError(err error) error { + return &nonRetriableError{err} +} + +// nonRetriableError is a struct that embeds the error interface. +// It is used to represent errors that should not be retried. +type nonRetriableError struct { + error +} + +// Error method for nonRetriableError struct. +// It returns the error message of the embedded error. +func (p *nonRetriableError) Error() string { + return p.error.Error() +} + +// NonRetriable is a marker method for nonRetriableError struct. +// Non-functional and indicates that the error is non-retriable. +func (*nonRetriableError) NonRetriable() { + // marker method +} + +// Unwrap method for nonRetriableError struct. +// It returns the original error that was marked as non-retriable. +func (p *nonRetriableError) Unwrap() error { + return p.error +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md index 169e46972..6e5e80087 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md @@ -1,5 +1,29 @@ # Release History +## 1.2.0 (2023-10-11) + +### Bugs Fixed +* Fixed null pointer exception when `SetImmutabilityPolicyOptions` is passed as `nil`. + +## 1.2.0-beta.1 (2023-09-18) + +### Features Added +* Added support for service version 2020-12-06, 2021-02-12, 2021-04-10, 2021-06-08, 2021-08-06 , 2021-10-04, 2021-12-02, 2022-11-02, 2023-01-03, 2023-05-03, and 2023-08-03 +* Added support for [Cold Tier](https://learn.microsoft.com/azure/storage/blobs/access-tiers-overview?tabs=azure-portal). +* Added `CopySourceTag` option for `UploadBlobFromURLOptions` +* Added [FilterBlobs by Tags](https://learn.microsoft.com/rest/api/storageservices/find-blobs-by-tags-container) API for container client. +* Added `System` option to `ListContainersInclude` to allow listing of system containers (i.e, $web). +* Updated the SAS Version to `2021-12-02` and added `Encryption Scope` to Account SAS, Service SAS, and User Delegation SAS +* Added `ArchiveStatusRehydratePendingToCold` value to `ArchiveStatus` enum. +* Content length limit for `AppendBlob.AppendBlock()` and `AppendBlob.AppendBlockFromURL()` raised from 4 MB to 100 MB. + +### Bugs Fixed +* Fixed issue where some requests fail with mismatch in string to sign. +* Fixed service SAS creation where expiry time or permissions can be omitted when stored access policy is used. Fixes [#21229](https://github.com/Azure/azure-sdk-for-go/issues/21229). + +### Other Changes +* Updating version of azcore to 1.6.0. + ## 1.1.0 (2023-07-13) ### Features Added @@ -15,7 +39,6 @@ * Fixed time formatting for the conditional request headers. Fixes [#20475](https://github.com/Azure/azure-sdk-for-go/issues/20475). * Fixed an issue where passing a blob tags map of length 0 would result in the x-ms-tags header to be sent to the service with an empty string as value. - * Fixed block size and number of blocks calculation in `UploadBuffer` and `UploadFile`. Fixes [#20735](https://github.com/Azure/azure-sdk-for-go/issues/20735). ### Other Changes diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md index 2759340f5..905fb2675 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md @@ -1,6 +1,6 @@ # Azure Blob Storage module for Go -> Service Version: 2020-10-02 +> Service Version: 2023-08-03 Azure Blob Storage is Microsoft's object storage solution for the cloud. Blob Storage is optimized for storing massive amounts of unstructured data - data that does not adhere to a particular data model or diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go index fc96b67de..69913e334 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go @@ -9,6 +9,7 @@ package appendblob import ( "context" "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "io" "os" "time" @@ -35,12 +36,14 @@ type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClien func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, - exported.ModuleVersion, runtime.PipelineOptions{}, - &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -49,12 +52,13 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, - exported.ModuleVersion, - runtime.PipelineOptions{}, - &conOptions.ClientOptions) - return (*Client)(base.NewAppendBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -64,13 +68,14 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, - exported.ModuleVersion, - runtime.PipelineOptions{}, - &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewAppendBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.AppendBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewAppendBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -130,7 +135,7 @@ func (ab *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -142,7 +147,7 @@ func (ab *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().Pipeline(), ab.sharedKey())), nil + return (*Client)(base.NewAppendBlobClient(p.String(), ab.generated().InternalClient(), ab.sharedKey())), nil } // Create creates a 0-size append blob. Call AppendBlock to append data to an append blob. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json index 4db1d7209..ee07ad45b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/storage/azblob", - "Tag": "go/storage/azblob_a772b9c866" + "Tag": "go/storage/azblob_818d8addd0" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go index 98c166aaf..55de9b349 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go @@ -8,15 +8,16 @@ package blob import ( "context" - "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "io" "os" "sync" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" @@ -37,10 +38,13 @@ type Client base.Client[generated.BlobClient] func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlobClient(blobURL, pl, &cred)), nil + azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, &cred)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -49,9 +53,12 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -61,10 +68,13 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.BlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -116,7 +126,7 @@ func (b *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.credential())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -128,7 +138,7 @@ func (b *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewBlobClient(p.String(), b.generated().Pipeline(), b.credential())), nil + return (*Client)(base.NewBlobClient(p.String(), b.generated().InternalClient(), b.credential())), nil } // Delete marks the specified blob or snapshot for deletion. The blob is later deleted during garbage collection. @@ -261,8 +271,8 @@ func (b *Client) SetLegalHold(ctx context.Context, legalHold bool, options *SetL // CopyFromURL synchronously copies the data at the source URL to a block blob, with sizes up to 256 MB. // For more information, see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url. func (b *Client) CopyFromURL(ctx context.Context, copySource string, options *CopyFromURLOptions) (CopyFromURLResponse, error) { - copyOptions, smac, mac, lac := options.format() - resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac) + copyOptions, smac, mac, lac, cpkScopeInfo := options.format() + resp, err := b.generated().CopyFromURL(ctx, copySource, copyOptions, smac, mac, lac, cpkScopeInfo) return resp, err } @@ -314,8 +324,8 @@ func (b *Client) GetSASURL(permissions sas.BlobPermissions, expiry time.Time, o // Concurrent Download Functions ----------------------------------------------------------------------------------------- -// download downloads an Azure blob to a WriterAt in parallel. -func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { +// downloadBuffer downloads an Azure blob to a WriterAt in parallel. +func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downloadOptions) (int64, error) { if o.BlockSize == 0 { o.BlockSize = DefaultDownloadBlockSize } @@ -343,6 +353,7 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt OperationName: "downloadBlobToWriterAt", TransferSize: count, ChunkSize: o.BlockSize, + NumChunks: uint16(((count - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, Operation: func(ctx context.Context, chunkStart int64, count int64) error { downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ @@ -381,6 +392,168 @@ func (b *Client) download(ctx context.Context, writer io.WriterAt, o downloadOpt return count, nil } +// downloadFile downloads an Azure blob to a Writer. The blocks are downloaded parallely, +// but written to file serially +func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadOptions) (int64, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + if o.BlockSize == 0 { + o.BlockSize = DefaultDownloadBlockSize + } + + if o.Concurrency == 0 { + o.Concurrency = DefaultConcurrency + } + + count := o.Range.Count + if count == CountToEnd { //Calculate size if not specified + gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions()) + if err != nil { + return 0, err + } + count = *gr.ContentLength - o.Range.Offset + } + + if count <= 0 { + // The file is empty, there is nothing to download. + return 0, nil + } + + progress := int64(0) + progressLock := &sync.Mutex{} + + // helper routine to get body + getBodyForRange := func(ctx context.Context, chunkStart, size int64) (io.ReadCloser, error) { + downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{ + Offset: chunkStart + o.Range.Offset, + Count: size, + }, nil) + dr, err := b.DownloadStream(ctx, downloadBlobOptions) + if err != nil { + return nil, err + } + + var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock) + if o.Progress != nil { + rangeProgress := int64(0) + body = streaming.NewResponseProgress( + body, + func(bytesTransferred int64) { + diff := bytesTransferred - rangeProgress + rangeProgress = bytesTransferred + progressLock.Lock() + progress += diff + o.Progress(progress) + progressLock.Unlock() + }) + } + + return body, nil + } + + // if file fits in a single buffer, we'll download here. + if count <= o.BlockSize { + body, err := getBodyForRange(ctx, int64(0), count) + if err != nil { + return 0, err + } + defer body.Close() + + return io.Copy(writer, body) + } + + buffers := shared.NewMMBPool(int(o.Concurrency), o.BlockSize) + defer buffers.Free() + aquireBuffer := func() ([]byte, error) { + select { + case b := <-buffers.Acquire(): + // got a buffer + return b, nil + default: + // no buffer available; allocate a new buffer if possible + if _, err := buffers.Grow(); err != nil { + return nil, err + } + + // either grab the newly allocated buffer or wait for one to become available + return <-buffers.Acquire(), nil + } + } + + numChunks := uint16((count-1)/o.BlockSize) + 1 + blocks := make([]chan []byte, numChunks) + for b := range blocks { + blocks[b] = make(chan []byte) + } + + /* + * We have created as many channels as the number of chunks we have. + * Each downloaded block will be sent to the channel matching its + * sequece number, i.e. 0th block is sent to 0th channel, 1st block + * to 1st channel and likewise. The blocks are then read and written + * to the file serially by below goroutine. Do note that the blocks + * blocks are still downloaded parallelly from n/w, only serailized + * and written to file here. + */ + writerError := make(chan error) + go func(ch chan error) { + for _, block := range blocks { + select { + case <-ctx.Done(): + return + case block := <-block: + _, err := writer.Write(block) + buffers.Release(block) + if err != nil { + ch <- err + return + } + } + } + ch <- nil + }(writerError) + + // Prepare and do parallel download. + err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{ + OperationName: "downloadBlobToWriterAt", + TransferSize: count, + ChunkSize: o.BlockSize, + NumChunks: numChunks, + Concurrency: o.Concurrency, + Operation: func(ctx context.Context, chunkStart int64, count int64) error { + buff, err := aquireBuffer() + if err != nil { + return err + } + + body, err := getBodyForRange(ctx, chunkStart, count) + if err != nil { + buffers.Release(buff) + return nil + } + + _, err = io.ReadFull(body, buff[:count]) + body.Close() + if err != nil { + return err + } + + blockIndex := (chunkStart / o.BlockSize) + blocks[blockIndex] <- buff + return nil + }, + }) + + if err != nil { + return 0, err + } + // error from writer thread. + if err = <-writerError; err != nil { + return 0, err + } + return count, nil +} + // DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata. // For more information, see https://docs.microsoft.com/rest/api/storageservices/get-blob. func (b *Client) DownloadStream(ctx context.Context, o *DownloadStreamOptions) (DownloadStreamResponse, error) { @@ -409,7 +582,7 @@ func (b *Client) DownloadBuffer(ctx context.Context, buffer []byte, o *DownloadB if o == nil { o = &DownloadBufferOptions{} } - return b.download(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) + return b.downloadBuffer(ctx, shared.NewBytesWriter(buffer), (downloadOptions)(*o)) } // DownloadFile downloads an Azure blob to a local file. @@ -448,7 +621,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil } if size > 0 { - return b.download(ctx, file, *do) + return b.downloadFile(ctx, file, *do) } else { // if the blob's size is 0, there is no need in downloading it return 0, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go index c15635448..daef800ed 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/constants.go @@ -9,6 +9,7 @@ package blob import ( "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) const ( @@ -18,6 +19,9 @@ const ( // DefaultDownloadBlockSize is default block size DefaultDownloadBlockSize = int64(4 * 1024 * 1024) // 4MB + + // DefaultConcurrency is the default number of blocks downloaded or uploaded in parallel + DefaultConcurrency = shared.DefaultConcurrency ) // BlobType defines values for BlobType @@ -53,6 +57,7 @@ type AccessTier = generated.AccessTier const ( AccessTierArchive AccessTier = generated.AccessTierArchive AccessTierCool AccessTier = generated.AccessTierCool + AccessTierCold AccessTier = generated.AccessTierCold AccessTierHot AccessTier = generated.AccessTierHot AccessTierP10 AccessTier = generated.AccessTierP10 AccessTierP15 AccessTier = generated.AccessTierP15 @@ -148,6 +153,7 @@ type ArchiveStatus = generated.ArchiveStatus const ( ArchiveStatusRehydratePendingToCool ArchiveStatus = generated.ArchiveStatusRehydratePendingToCool ArchiveStatusRehydratePendingToHot ArchiveStatus = generated.ArchiveStatusRehydratePendingToHot + ArchiveStatusRehydratePendingToCold ArchiveStatus = generated.ArchiveStatusRehydratePendingToCold ) // PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go index fea2f1ec5..5a79c12d4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/models.go @@ -458,7 +458,7 @@ type SetImmutabilityPolicyOptions struct { func (o *SetImmutabilityPolicyOptions) format() (*generated.BlobClientSetImmutabilityPolicyOptions, *ModifiedAccessConditions) { if o == nil { - return nil, nil + return &generated.BlobClientSetImmutabilityPolicyOptions{}, nil } ac := &exported.BlobAccessConditions{ ModifiedAccessConditions: o.ModifiedAccessConditions, @@ -544,11 +544,13 @@ type CopyFromURLOptions struct { SourceModifiedAccessConditions *SourceModifiedAccessConditions BlobAccessConditions *AccessConditions + + CPKScopeInfo *CPKScopeInfo } -func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions) { +func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, *generated.SourceModifiedAccessConditions, *generated.ModifiedAccessConditions, *generated.LeaseAccessConditions, *generated.CPKScopeInfo) { if o == nil { - return nil, nil, nil, nil + return nil, nil, nil, nil, nil } options := &generated.BlobClientCopyFromURLOptions{ @@ -563,7 +565,7 @@ func (o *CopyFromURLOptions) format() (*generated.BlobClientCopyFromURLOptions, } leaseAccessConditions, modifiedAccessConditions := exported.FormatBlobAccessConditions(o.BlobAccessConditions) - return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions + return options, o.SourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, o.CPKScopeInfo } // --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go index ad653c1c4..8a1573c0c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror/error_codes.go @@ -153,4 +153,5 @@ const ( var ( // MissingSharedKeyCredential - Error is returned when SAS URL is being created without SharedKeyCredential. MissingSharedKeyCredential = errors.New("SAS can only be signed with a SharedKeyCredential") + UnsupportedChecksum = errors.New("for multi-part uploads, user generated checksums cannot be validated") ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go index 340d4bc76..212255d4c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/chunkwriting.go @@ -18,6 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/internal/uuid" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared" ) // blockWriter provides methods to upload blocks that represent a file to a server and commit them. @@ -28,27 +29,8 @@ type blockWriter interface { CommitBlockList(context.Context, []string, *CommitBlockListOptions) (CommitBlockListResponse, error) } -// bufferManager provides an abstraction for the management of buffers. -// this is mostly for testing purposes, but does allow for different implementations without changing the algorithm. -type bufferManager[T ~[]byte] interface { - // Acquire returns the channel that contains the pool of buffers. - Acquire() <-chan T - - // Release releases the buffer back to the pool for reuse/cleanup. - Release(T) - - // Grow grows the number of buffers, up to the predefined max. - // It returns the total number of buffers or an error. - // No error is returned if the number of buffers has reached max. - // This is called only from the reading goroutine. - Grow() (int, error) - - // Free cleans up all buffers. - Free() -} - // copyFromReader copies a source io.Reader to blob storage using concurrent uploads. -func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) bufferManager[T]) (CommitBlockListResponse, error) { +func copyFromReader[T ~[]byte](ctx context.Context, src io.Reader, dst blockWriter, options UploadStreamOptions, getBufferManager func(maxBuffers int, bufferSize int64) shared.BufferManager[T]) (CommitBlockListResponse, error) { options.setDefaults() wg := sync.WaitGroup{} // Used to know when all outgoing blocks have finished processing @@ -265,49 +247,3 @@ func (ubi uuidBlockID) WithBlockNumber(blockNumber uint32) uuidBlockID { func (ubi uuidBlockID) ToBase64() string { return blockID(ubi).ToBase64() } - -// mmbPool implements the bufferManager interface. -// it uses anonymous memory mapped files for buffers. -// don't use this type directly, use newMMBPool() instead. -type mmbPool struct { - buffers chan mmb - count int - max int - size int64 -} - -func newMMBPool(maxBuffers int, bufferSize int64) bufferManager[mmb] { - return &mmbPool{ - buffers: make(chan mmb, maxBuffers), - max: maxBuffers, - size: bufferSize, - } -} - -func (pool *mmbPool) Acquire() <-chan mmb { - return pool.buffers -} - -func (pool *mmbPool) Grow() (int, error) { - if pool.count < pool.max { - buffer, err := newMMB(pool.size) - if err != nil { - return 0, err - } - pool.buffers <- buffer - pool.count++ - } - return pool.count, nil -} - -func (pool *mmbPool) Release(buffer mmb) { - pool.buffers <- buffer -} - -func (pool *mmbPool) Free() { - for i := 0; i < pool.count; i++ { - buffer := <-pool.buffers - buffer.delete() - } - pool.count = 0 -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go index d7e5aa0dc..8b542f85a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go @@ -11,9 +11,12 @@ import ( "context" "encoding/base64" "errors" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" "io" "math" "os" + "reflect" "sync" "time" @@ -43,10 +46,13 @@ type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -55,9 +61,13 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewBlockBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -67,10 +77,14 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewBlockBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.BlockBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewBlockBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -130,7 +144,7 @@ func (bb *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil } // WithVersionID creates a new AppendBlobURL object identical to the source but with the specified version id. @@ -142,7 +156,7 @@ func (bb *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Pipeline(), bb.sharedKey())), nil + return (*Client)(base.NewBlockBlobClient(p.String(), bb.generated().Internal(), bb.sharedKey())), nil } // Upload creates a new block blob or overwrites an existing block blob. @@ -160,6 +174,13 @@ func (bb *Client) Upload(ctx context.Context, body io.ReadSeekCloser, options *U opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions := options.format() + if options != nil && options.TransactionalValidation != nil { + body, err = options.TransactionalValidation.Apply(body, opts) + if err != nil { + return UploadResponse{}, err + } + } + resp, err := bb.generated().Upload(ctx, count, body, opts, httpHeaders, leaseInfo, cpkV, cpkN, accessConditions) return resp, err } @@ -248,6 +269,11 @@ func (bb *Client) CommitBlockList(ctx context.Context, base64BlockIDs []string, ImmutabilityPolicyExpiry: options.ImmutabilityPolicyExpiryTime, } + // If user attempts to pass in their own checksum, errors out. + if options.TransactionalContentMD5 != nil || options.TransactionalContentCRC64 != nil { + return CommitBlockListResponse{}, bloberror.UnsupportedChecksum + } + headers = options.HTTPHeaders leaseAccess, modifiedAccess = exported.FormatBlobAccessConditions(options.AccessConditions) cpkInfo = options.CPKInfo @@ -440,6 +466,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu OperationName: "uploadFromReader", TransferSize: actualSize, ChunkSize: o.BlockSize, + NumChunks: uint16(((actualSize - 1) / o.BlockSize) + 1), Concurrency: o.Concurrency, Operation: func(ctx context.Context, offset int64, chunkSize int64) error { // This function is called once per block. @@ -494,6 +521,12 @@ func (bb *Client) UploadBuffer(ctx context.Context, buffer []byte, o *UploadBuff if o != nil { uploadOptions = *o } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadBufferResponse{}, bloberror.UnsupportedChecksum + } + return bb.uploadFromReader(ctx, bytes.NewReader(buffer), int64(len(buffer)), &uploadOptions) } @@ -507,6 +540,12 @@ func (bb *Client) UploadFile(ctx context.Context, file *os.File, o *UploadFileOp if o != nil { uploadOptions = *o } + + // If user attempts to pass in their own checksum, errors out. + if uploadOptions.TransactionalValidation != nil && reflect.TypeOf(uploadOptions.TransactionalValidation).Kind() != reflect.Func { + return UploadFileResponse{}, bloberror.UnsupportedChecksum + } + return bb.uploadFromReader(ctx, file, stat.Size(), &uploadOptions) } @@ -517,7 +556,12 @@ func (bb *Client) UploadStream(ctx context.Context, body io.Reader, o *UploadStr o = &UploadStreamOptions{} } - result, err := copyFromReader(ctx, body, bb, *o, newMMBPool) + // If user attempts to pass in their own checksum, errors out. + if o.TransactionalValidation != nil && reflect.TypeOf(o.TransactionalValidation).Kind() != reflect.Func { + return UploadStreamResponse{}, bloberror.UnsupportedChecksum + } + + result, err := copyFromReader(ctx, body, bb, *o, shared.NewMMBPool) if err != nil { return CommitBlockListResponse{}, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go index cb1162640..ce3a5d8de 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/constants.go @@ -37,3 +37,16 @@ const ( func PossibleBlockListTypeValues() []BlockListType { return generated.PossibleBlockListTypeValues() } + +// BlobCopySourceTags - can be 'COPY' or 'REPLACE' +type BlobCopySourceTags = generated.BlobCopySourceTags + +const ( + BlobCopySourceTagsCopy = generated.BlobCopySourceTagsCOPY + BlobCopySourceTagsReplace = generated.BlobCopySourceTagsREPLACE +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return generated.PossibleBlobCopySourceTagsValues() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go index ba1b9ee9f..453d569e5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/models.go @@ -36,8 +36,9 @@ type UploadOptions struct { // Optional. Indicates the tier to be set on the blob. Tier *blob.AccessTier - // Specify the transactional md5 for the body, to be validated by the service. - TransactionalContentMD5 []byte + // TransactionalValidation specifies the transfer validation type to use. + // The default is nil (no transfer validation). + TransactionalValidation blob.TransferValidationType HTTPHeaders *blob.HTTPHeaders CPKInfo *blob.CPKInfo @@ -46,6 +47,9 @@ type UploadOptions struct { LegalHold *bool ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentMD5 can be set by using TransactionalValidation instead + TransactionalContentMD5 []byte } func (o *UploadOptions) format() (*generated.BlockBlobClientUploadOptions, *generated.BlobHTTPHeaders, *generated.LeaseAccessConditions, @@ -81,6 +85,9 @@ type UploadBlobFromURLOptions struct { // Optional, default is true. Indicates if properties from the source blob should be copied. CopySourceBlobProperties *bool + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags + // Optional. Specifies a user-defined name-value pair associated with the blob. Metadata map[string]*string @@ -109,6 +116,7 @@ func (o *UploadBlobFromURLOptions) format() (*generated.BlockBlobClientPutBlobFr BlobTagsString: shared.SerializeBlobTagsToStrPtr(o.Tags), CopySourceAuthorization: o.CopySourceAuthorization, CopySourceBlobProperties: o.CopySourceBlobProperties, + CopySourceTags: o.CopySourceTags, Metadata: o.Metadata, SourceContentMD5: o.SourceContentMD5, Tier: o.Tier, @@ -190,8 +198,6 @@ type CommitBlockListOptions struct { RequestID *string Tier *blob.AccessTier Timeout *int32 - TransactionalContentCRC64 []byte - TransactionalContentMD5 []byte HTTPHeaders *blob.HTTPHeaders CPKInfo *blob.CPKInfo CPKScopeInfo *blob.CPKScopeInfo @@ -199,6 +205,12 @@ type CommitBlockListOptions struct { LegalHold *bool ImmutabilityPolicyMode *blob.ImmutabilityPolicySetting ImmutabilityPolicyExpiryTime *time.Time + + // Deprecated: TransactionalContentCRC64 cannot be generated + TransactionalContentCRC64 []byte + + // Deprecated: TransactionalContentMD5 cannot be generated + TransactionalContentMD5 []byte } // --------------------------------------------------------------------------------------------------------------------- @@ -253,9 +265,10 @@ type uploadFromReaderOptions struct { TransactionalValidation blob.TransferValidationType - // Optional header, Specifies the transactional crc64 for the body, to be validated by the service. + // Deprecated: TransactionalContentCRC64 cannot be generated at block level TransactionalContentCRC64 uint64 - // Specify the transactional md5 for the body, to be validated by the service. + + // Deprecated: TransactionalContentMD5 cannot be generated at block level TransactionalContentMD5 []byte } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go index 2ac5a820a..3058b5d49 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go @@ -44,10 +44,13 @@ type Client base.Client[generated.ContainerClient] func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewContainerClient(containerURL, pl, &cred)), nil + azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, &cred)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -56,9 +59,12 @@ func NewClient(containerURL string, cred azcore.TokenCredential, options *Client // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewContainerClient(containerURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -68,10 +74,13 @@ func NewClientWithNoCredential(containerURL string, options *ClientOptions) (*Cl func NewClientWithSharedKeyCredential(containerURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewContainerClient(containerURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.ContainerClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewContainerClient(containerURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -124,7 +133,7 @@ func (c *Client) URL() string { func (c *Client) NewBlobClient(blobName string) *blob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().Pipeline(), c.credential())) + return (*blob.Client)(base.NewBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlobClient), c.credential())) } // NewAppendBlobClient creates a new appendblob.Client object by concatenating blobName to the end of @@ -133,7 +142,7 @@ func (c *Client) NewBlobClient(blobName string) *blob.Client { func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*appendblob.Client)(base.NewAppendBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.AppendBlobClient), c.sharedKey())) } // NewBlockBlobClient creates a new blockblob.Client object by concatenating blobName to the end of @@ -142,7 +151,7 @@ func (c *Client) NewAppendBlobClient(blobName string) *appendblob.Client { func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*blockblob.Client)(base.NewBlockBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.BlockBlobClient), c.sharedKey())) } // NewPageBlobClient creates a new pageblob.Client object by concatenating blobName to the end of @@ -151,7 +160,7 @@ func (c *Client) NewBlockBlobClient(blobName string) *blockblob.Client { func (c *Client) NewPageBlobClient(blobName string) *pageblob.Client { blobName = url.PathEscape(blobName) blobURL := runtime.JoinPaths(c.URL(), blobName) - return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().Pipeline(), c.sharedKey())) + return (*pageblob.Client)(base.NewPageBlobClient(blobURL, c.generated().InternalClient().WithClientName(shared.PageBlobClient), c.sharedKey())) } // Create creates a new container within a storage account. If a container with the same name already exists, the operation fails. @@ -272,7 +281,7 @@ func (c *Client) NewListBlobsFlatPager(o *ListBlobsFlatOptions) *runtime.Pager[L if err != nil { return ListBlobsFlatResponse{}, err } - resp, err := c.generated().Pipeline().Do(req) + resp, err := c.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListBlobsFlatResponse{}, err } @@ -308,7 +317,7 @@ func (c *Client) NewListBlobsHierarchyPager(delimiter string, o *ListBlobsHierar if err != nil { return ListBlobsHierarchyResponse{}, err } - resp, err := c.generated().Pipeline().Do(req) + resp, err := c.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListBlobsHierarchyResponse{}, err } @@ -412,3 +421,12 @@ func (c *Client) SubmitBatch(ctx context.Context, bb *BatchBuilder, options *Sub Version: resp.Version, }, nil } + +// FilterBlobs operation finds all blobs in the container whose tags match a given search expression. +// https://docs.microsoft.com/en-us/rest/api/storageservices/find-blobs-by-tags-container +// eg. "dog='germanshepherd' and penguin='emperorpenguin'" +func (c *Client) FilterBlobs(ctx context.Context, where string, o *FilterBlobsOptions) (FilterBlobsResponse, error) { + containerClientFilterBlobsOptions := o.format() + resp, err := c.generated().FilterBlobs(ctx, where, containerClientFilterBlobsOptions) + return resp, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go index f1724861e..61d936ab7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go @@ -397,3 +397,31 @@ type SubmitBatchOptions struct { func (o *SubmitBatchOptions) format() *generated.ContainerClientSubmitBatchOptions { return nil } + +// --------------------------------------------------------------------------------------------------------------------- + +// FilterBlobsOptions provides set of options for Client.FilterBlobs. +type FilterBlobsOptions struct { + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + MaxResults *int32 +} + +func (o *FilterBlobsOptions) format() *generated.ContainerClientFilterBlobsOptions { + if o == nil { + return nil + } + return &generated.ContainerClientFilterBlobsOptions{ + Marker: o.Marker, + Maxresults: o.MaxResults, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go index 4d1e406ea..9aaefe277 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/responses.go @@ -64,3 +64,6 @@ type SubmitBatchResponse struct { // BatchResponseItem contains the response for the individual sub-requests. type BatchResponseItem = exported.BatchResponseItem + +// FilterBlobsResponse contains the response from method Client.FilterBlobs. +type FilterBlobsResponse = generated.ContainerClientFilterBlobsResponse diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go index 6c3cc5c93..0bdbaefaf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base/clients.go @@ -8,7 +8,6 @@ package base import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported" "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated" ) @@ -44,23 +43,23 @@ func NewClient[T any](inner *T) *Client[T] { return &Client[T]{inner: inner} } -func NewServiceClient(containerURL string, pipeline runtime.Pipeline, credential any) *Client[generated.ServiceClient] { +func NewServiceClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ServiceClient] { return &Client[generated.ServiceClient]{ - inner: generated.NewServiceClient(containerURL, pipeline), + inner: generated.NewServiceClient(containerURL, azClient), credential: credential, } } -func NewContainerClient(containerURL string, pipeline runtime.Pipeline, credential any) *Client[generated.ContainerClient] { +func NewContainerClient(containerURL string, azClient *azcore.Client, credential any) *Client[generated.ContainerClient] { return &Client[generated.ContainerClient]{ - inner: generated.NewContainerClient(containerURL, pipeline), + inner: generated.NewContainerClient(containerURL, azClient), credential: credential, } } -func NewBlobClient(blobURL string, pipeline runtime.Pipeline, credential any) *Client[generated.BlobClient] { +func NewBlobClient(blobURL string, azClient *azcore.Client, credential any) *Client[generated.BlobClient] { return &Client[generated.BlobClient]{ - inner: generated.NewBlobClient(blobURL, pipeline), + inner: generated.NewBlobClient(blobURL, azClient), credential: credential, } } @@ -75,26 +74,26 @@ func InnerClients[T, U any](client *CompositeClient[T, U]) (*Client[T], *U) { return &Client[T]{inner: client.innerT}, client.innerU } -func NewAppendBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { +func NewAppendBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.AppendBlobClient] { return &CompositeClient[generated.BlobClient, generated.AppendBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewAppendBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewAppendBlobClient(blobURL, azClient), sharedKey: sharedKey, } } -func NewBlockBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { +func NewBlockBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.BlockBlobClient] { return &CompositeClient[generated.BlobClient, generated.BlockBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewBlockBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewBlockBlobClient(blobURL, azClient), sharedKey: sharedKey, } } -func NewPageBlobClient(blobURL string, pipeline runtime.Pipeline, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { +func NewPageBlobClient(blobURL string, azClient *azcore.Client, sharedKey *exported.SharedKeyCredential) *CompositeClient[generated.BlobClient, generated.PageBlobClient] { return &CompositeClient[generated.BlobClient, generated.PageBlobClient]{ - innerT: generated.NewBlobClient(blobURL, pipeline), - innerU: generated.NewPageBlobClient(blobURL, pipeline), + innerT: generated.NewBlobClient(blobURL, azClient), + innerU: generated.NewPageBlobClient(blobURL, azClient), sharedKey: sharedKey, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go index d15631054..bd0bd5e26 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go @@ -172,7 +172,7 @@ func (c *SharedKeyCredential) buildCanonicalizedResource(u *url.URL) (string, er // Join the sorted key values separated by ',' // Then prepend "keyName:"; then add this string to the buffer - cr.WriteString("\n" + paramName + ":" + strings.Join(paramValues, ",")) + cr.WriteString("\n" + strings.ToLower(paramName) + ":" + strings.Join(paramValues, ",")) } } return cr.String(), nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go index f27f52983..935debca3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go @@ -8,5 +8,5 @@ package exported const ( ModuleName = "azblob" - ModuleVersion = "v1.1.0" + ModuleVersion = "v1.2.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go index 3b6184fea..288df7edd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/appendblob_client.go @@ -8,12 +8,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *AppendBlobClient) Endpoint() string { return client.endpoint } -func (client *AppendBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *AppendBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewAppendBlobClient(endpoint string, azClient *azcore.Client) *AppendBlobClient { + client := &AppendBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md index 3044a608a..367f020f4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md @@ -7,7 +7,7 @@ go: true clear-output-folder: false version: "^3.0.0" license-header: MICROSOFT_MIT_NO_VERSION -input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/e515b6251fdc21015282d2e84b85beec7c091763/specification/storage/data-plane/Microsoft.BlobStorage/preview/2020-10-02/blob.json" +input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a32d0b2423d19835246bb2ef92941503bfd5e734/specification/storage/data-plane/Microsoft.BlobStorage/preview/2021-12-02/blob.json" credential-scope: "https://storage.azure.com/.default" output-folder: ../generated file-prefix: "zz_" @@ -19,7 +19,43 @@ modelerfour: seal-single-value-enum-by-default: true lenient-model-deduplication: true export-clients: true -use: "@autorest/go@4.0.0-preview.45" +use: "@autorest/go@4.0.0-preview.49" +``` + +### Updating service version to 2023-08-03 +```yaml +directive: +- from: + - zz_appendblob_client.go + - zz_blob_client.go + - zz_blockblob_client.go + - zz_container_client.go + - zz_pageblob_client.go + - zz_service_client.go + where: $ + transform: >- + return $. + replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`). + replaceAll(`2021-12-02`, `2023-08-03`); +``` + +### Undo breaking change with BlobName +``` yaml +directive: +- from: zz_models.go + where: $ + transform: >- + return $. + replace(/Name\s+\*BlobName/g, `Name *string`); +``` + +### Removing UnmarshalXML for BlobItems to create customer UnmarshalXML function +```yaml +directive: +- from: swagger-document + where: $.definitions + transform: > + $.BlobItemInternal["x-ms-go-omit-serde-methods"] = true; ``` ### Remove pager methods and export various generated methods in container client diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go index 478962de1..343073b2e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/blob_client.go @@ -8,8 +8,8 @@ package generated import ( "context" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "time" ) @@ -20,8 +20,8 @@ func (client *BlobClient) Endpoint() string { return client.endpoint } -func (client *BlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *BlobClient) InternalClient() *azcore.Client { + return client.internal } func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *BlobClientDeleteOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { @@ -31,3 +31,14 @@ func (client *BlobClient) DeleteCreateRequest(ctx context.Context, options *Blob func (client *BlobClient) SetTierCreateRequest(ctx context.Context, tier AccessTier, options *BlobClientSetTierOptions, leaseAccessConditions *LeaseAccessConditions, modifiedAccessConditions *ModifiedAccessConditions) (*policy.Request, error) { return client.setTierCreateRequest(ctx, tier, options, leaseAccessConditions, modifiedAccessConditions) } + +// NewBlobClient creates a new instance of BlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlobClient(endpoint string, azClient *azcore.Client) *BlobClient { + client := &BlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go index a43e327ec..873d9a419 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/block_blob_client.go @@ -8,12 +8,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *BlockBlobClient) Endpoint() string { return client.endpoint } -func (client *BlockBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *BlockBlobClient) Internal() *azcore.Client { + return client.internal +} + +// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewBlockBlobClient(endpoint string, azClient *azcore.Client) *BlockBlobClient { + client := &BlockBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go new file mode 100644 index 000000000..8c13c4411 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go @@ -0,0 +1,9 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package generated + +const ServiceVersion = "2023-08-03" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go index bbbf828a0..d43b2c782 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/container_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *ContainerClient) Endpoint() string { return client.endpoint } -func (client *ContainerClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *ContainerClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewContainerClient creates a new instance of ContainerClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - pl - the pipeline used for sending requests and handling responses. +func NewContainerClient(endpoint string, azClient *azcore.Client) *ContainerClient { + client := &ContainerClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go index 759d92630..aaef9f53b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/models.go @@ -6,6 +6,12 @@ package generated +import ( + "encoding/xml" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "net/url" +) + type TransactionalContentSetter interface { SetCRC64([]byte) SetMD5([]byte) @@ -35,6 +41,14 @@ func (p *PageBlobClientUploadPagesOptions) SetMD5(v []byte) { p.TransactionalContentMD5 = v } +func (b *BlockBlobClientUploadOptions) SetCRC64(v []byte) { + b.TransactionalContentCRC64 = v +} + +func (b *BlockBlobClientUploadOptions) SetMD5(v []byte) { + b.TransactionalContentMD5 = v +} + type SourceContentSetter interface { SetSourceContentCRC64(v []byte) SetSourceContentMD5(v []byte) @@ -63,3 +77,65 @@ func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentCRC64(v []byte func (p *PageBlobClientUploadPagesFromURLOptions) SetSourceContentMD5(v []byte) { p.SourceContentMD5 = v } + +// Custom UnmarshalXML functions for types that need special handling. + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobPrefix. +func (b *BlobPrefix) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobPrefix + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} + +// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem. +func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type alias BlobItem + aux := &struct { + *alias + BlobName *BlobName `xml:"Name"` + Metadata additionalProperties `xml:"Metadata"` + OrMetadata additionalProperties `xml:"OrMetadata"` + }{ + alias: (*alias)(b), + } + if err := dec.DecodeElement(aux, &start); err != nil { + return err + } + b.Metadata = (map[string]*string)(aux.Metadata) + b.OrMetadata = (map[string]*string)(aux.OrMetadata) + if aux.BlobName != nil { + if aux.BlobName.Encoded != nil && *aux.BlobName.Encoded { + name, err := url.QueryUnescape(*aux.BlobName.Content) + + // name, err := base64.StdEncoding.DecodeString(*aux.BlobName.Content) + if err != nil { + return err + } + b.Name = to.Ptr(string(name)) + } else { + b.Name = aux.BlobName.Content + } + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go index 8a212cc3d..a7c76208a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/pageblob_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *PageBlobClient) Endpoint() string { return client.endpoint } -func (client *PageBlobClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *PageBlobClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewPageBlobClient(endpoint string, azClient *azcore.Client) *PageBlobClient { + client := &PageBlobClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go index 1f449b955..32c15a2b0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/service_client.go @@ -6,12 +6,25 @@ package generated -import "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +import ( + "github.com/Azure/azure-sdk-for-go/sdk/azcore" +) func (client *ServiceClient) Endpoint() string { return client.endpoint } -func (client *ServiceClient) Pipeline() runtime.Pipeline { - return client.pl +func (client *ServiceClient) InternalClient() *azcore.Client { + return client.internal +} + +// NewServiceClient creates a new instance of ServiceClient with the specified values. +// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. +// - azClient - azcore.Client is a basic HTTP client. It consists of a pipeline and tracing provider. +func NewServiceClient(endpoint string, azClient *azcore.Client) *ServiceClient { + client := &ServiceClient{ + internal: azClient, + endpoint: endpoint, + } + return client } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go index d0ddb9064..32be22221 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go @@ -22,21 +22,10 @@ import ( ) // AppendBlobClient contains the methods for the AppendBlob group. -// Don't use this type directly, use NewAppendBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type AppendBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewAppendBlobClient creates a new instance of AppendBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient { - client := &AppendBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AppendBlock - The Append Block operation commits a new block of data to the end of an existing append blob. The Append @@ -44,7 +33,7 @@ func NewAppendBlobClient(endpoint string, pl runtime.Pipeline) *AppendBlobClient // AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - body - Initial data // - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method. @@ -59,7 +48,7 @@ func (client *AppendBlobClient) AppendBlock(ctx context.Context, contentLength i if err != nil { return AppendBlobClientAppendBlockResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockResponse{}, err } @@ -124,12 +113,15 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // appendBlockHandleResponse handles the AppendBlock response. @@ -207,7 +199,7 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) ( // created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - sourceURL - Specify a URL to the copy source. // - contentLength - The length of the request. // - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL @@ -225,7 +217,7 @@ func (client *AppendBlobClient) AppendBlockFromURL(ctx context.Context, sourceUR if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientAppendBlockFromURLResponse{}, err } @@ -309,7 +301,7 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -390,7 +382,7 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp // Create - The Create Append Blob operation creates a new append blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method. // - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. @@ -403,7 +395,7 @@ func (client *AppendBlobClient) Create(ctx context.Context, contentLength int64, if err != nil { return AppendBlobClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientCreateResponse{}, err } @@ -481,7 +473,7 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -560,7 +552,7 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen // or later. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -571,7 +563,7 @@ func (client *AppendBlobClient) Seal(ctx context.Context, options *AppendBlobCli if err != nil { return AppendBlobClientSealResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return AppendBlobClientSealResponse{}, err } @@ -593,7 +585,7 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options * reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go index 67402b5a5..257a3656d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go @@ -23,28 +23,17 @@ import ( ) // BlobClient contains the methods for the Blob group. -// Don't use this type directly, use NewBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type BlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewBlobClient creates a new instance of BlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewBlobClient(endpoint string, pl runtime.Pipeline) *BlobClient { - client := &BlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AbortCopyFromURL - The Abort Copy From URL operation aborts a pending Copy From URL operation, and leaves a destination // blob with zero length and full metadata. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation. // - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -53,7 +42,7 @@ func (client *BlobClient) AbortCopyFromURL(ctx context.Context, copyID string, o if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAbortCopyFromURLResponse{}, err } @@ -80,7 +69,7 @@ func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -113,7 +102,7 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B // AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite // lease can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. @@ -124,7 +113,7 @@ func (client *BlobClient) AcquireLease(ctx context.Context, duration int32, opti if err != nil { return BlobClientAcquireLeaseResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientAcquireLeaseResponse{}, err } @@ -166,7 +155,7 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -212,7 +201,7 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC // BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) { @@ -220,7 +209,7 @@ func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBre if err != nil { return BlobClientBreakLeaseResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientBreakLeaseResponse{}, err } @@ -261,7 +250,7 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options * if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -312,7 +301,7 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli // ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID @@ -324,7 +313,7 @@ func (client *BlobClient) ChangeLease(ctx context.Context, leaseID string, propo if err != nil { return BlobClientChangeLeaseResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientChangeLeaseResponse{}, err } @@ -364,7 +353,7 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -411,7 +400,7 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl // until the copy is complete. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -420,12 +409,13 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. -func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (BlobClientCopyFromURLResponse, error) { - req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions) +// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. +func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (BlobClientCopyFromURLResponse, error) { + req, err := client.copyFromURLCreateRequest(ctx, copySource, options, sourceModifiedAccessConditions, modifiedAccessConditions, leaseAccessConditions, cpkScopeInfo) if err != nil { return BlobClientCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCopyFromURLResponse{}, err } @@ -436,7 +426,7 @@ func (client *BlobClient) CopyFromURL(ctx context.Context, copySource string, op } // copyFromURLCreateRequest creates the CopyFromURL request. -func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions) (*policy.Request, error) { +func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySource string, options *BlobClientCopyFromURLOptions, sourceModifiedAccessConditions *SourceModifiedAccessConditions, modifiedAccessConditions *ModifiedAccessConditions, leaseAccessConditions *LeaseAccessConditions, cpkScopeInfo *CPKScopeInfo) (*policy.Request, error) { req, err := runtime.NewRequest(ctx, http.MethodPut, client.endpoint) if err != nil { return nil, err @@ -488,7 +478,7 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -510,6 +500,12 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour if options != nil && options.CopySourceAuthorization != nil { req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } + if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { + req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} + } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -566,13 +562,16 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl } result.ContentCRC64 = contentCRC64 } + if val := resp.Header.Get("x-ms-encryption-scope"); val != "" { + result.EncryptionScope = &val + } return result, nil } // CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. // - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method. @@ -583,7 +582,7 @@ func (client *BlobClient) CreateSnapshot(ctx context.Context, options *BlobClien if err != nil { return BlobClientCreateSnapshotResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientCreateSnapshotResponse{}, err } @@ -642,7 +641,7 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -708,7 +707,7 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo // return an HTTP status code of 404 (ResourceNotFound). // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -717,7 +716,7 @@ func (client *BlobClient) Delete(ctx context.Context, options *BlobClientDeleteO if err != nil { return BlobClientDeleteResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteResponse{}, err } @@ -768,7 +767,7 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -801,7 +800,7 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD // DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy // method. func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) { @@ -809,7 +808,7 @@ func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDeleteImmutabilityPolicyResponse{}, err } @@ -831,7 +830,7 @@ func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -865,7 +864,7 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp // can also call Download to read a snapshot. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. @@ -875,7 +874,7 @@ func (client *BlobClient) Download(ctx context.Context, options *BlobClientDownl if err != nil { return BlobClientDownloadResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientDownloadResponse{}, err } @@ -939,7 +938,7 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -957,6 +956,13 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien } result.LastModified = &lastModified } + if val := resp.Header.Get("x-ms-creation-time"); val != "" { + creationTime, err := time.Parse(time.RFC1123, val) + if err != nil { + return BlobClientDownloadResponse{}, err + } + result.CreationTime = &creationTime + } for hh := range resp.Header { if len(hh) > len("x-ms-meta-") && strings.EqualFold(hh[:len("x-ms-meta-")], "x-ms-meta-") { if result.Metadata == nil { @@ -1163,14 +1169,14 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method. func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) { req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return BlobClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetAccountInfoResponse{}, err } @@ -1190,7 +1196,7 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -1227,7 +1233,7 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo // for the blob. It does not return the content of the blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. @@ -1237,7 +1243,7 @@ func (client *BlobClient) GetProperties(ctx context.Context, options *BlobClient if err != nil { return BlobClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetPropertiesResponse{}, err } @@ -1291,7 +1297,7 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1542,7 +1548,7 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob // GetTags - The Get Tags operation enables users to get the tags associated with a blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -1551,7 +1557,7 @@ func (client *BlobClient) GetTags(ctx context.Context, options *BlobClientGetTag if err != nil { return BlobClientGetTagsResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientGetTagsResponse{}, err } @@ -1579,7 +1585,7 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1621,7 +1627,7 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient // Query - The Query operation enables users to select/project on blob data by providing simple query expressions. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. @@ -1631,7 +1637,7 @@ func (client *BlobClient) Query(ctx context.Context, options *BlobClientQueryOpt if err != nil { return BlobClientQueryResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientQueryResponse{}, err } @@ -1684,13 +1690,16 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} if options != nil && options.QueryRequest != nil { - return req, runtime.MarshalAsXML(req, *options.QueryRequest) + if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil { + return nil, err + } + return req, nil } return req, nil } @@ -1849,7 +1858,7 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu // ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -1858,7 +1867,7 @@ func (client *BlobClient) ReleaseLease(ctx context.Context, leaseID string, opti if err != nil { return BlobClientReleaseLeaseResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientReleaseLeaseResponse{}, err } @@ -1897,7 +1906,7 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1940,7 +1949,7 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC // RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -1949,7 +1958,7 @@ func (client *BlobClient) RenewLease(ctx context.Context, leaseID string, option if err != nil { return BlobClientRenewLeaseResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientRenewLeaseResponse{}, err } @@ -1988,7 +1997,7 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2034,7 +2043,7 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli // SetExpiry - Sets the time a blob will expire and be deleted. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - expiryOptions - Required. Indicates mode of the expiry time // - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method. func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) { @@ -2042,7 +2051,7 @@ func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOpt if err != nil { return BlobClientSetExpiryResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetExpiryResponse{}, err } @@ -2064,7 +2073,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2111,7 +2120,7 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie // SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method. // - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -2121,7 +2130,7 @@ func (client *BlobClient) SetHTTPHeaders(ctx context.Context, options *BlobClien if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetHTTPHeadersResponse{}, err } @@ -2179,7 +2188,7 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil { req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2229,7 +2238,7 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo // SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy // method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -2238,7 +2247,7 @@ func (client *BlobClient) SetImmutabilityPolicy(ctx context.Context, options *Bl if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetImmutabilityPolicyResponse{}, err } @@ -2260,7 +2269,7 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2312,7 +2321,7 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons // SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - legalHold - Specified if a legal hold should be set on the blob. // - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method. func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) { @@ -2320,7 +2329,7 @@ func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, opti if err != nil { return BlobClientSetLegalHoldResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetLegalHoldResponse{}, err } @@ -2342,7 +2351,7 @@ func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2384,7 +2393,7 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC // pairs // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method. @@ -2395,7 +2404,7 @@ func (client *BlobClient) SetMetadata(ctx context.Context, options *BlobClientSe if err != nil { return BlobClientSetMetadataResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetMetadataResponse{}, err } @@ -2454,7 +2463,7 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2513,7 +2522,7 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl // SetTags - The Set Tags operation enables users to set tags on a blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - tags - Blob tags // - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -2523,7 +2532,7 @@ func (client *BlobClient) SetTags(ctx context.Context, tags BlobTags, options *B if err != nil { return BlobClientSetTagsResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTagsResponse{}, err } @@ -2548,7 +2557,7 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag reqQP.Set("versionid", *options.VersionID) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.TransactionalContentMD5 != nil { req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)} } @@ -2565,7 +2574,10 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, tags) + if err := runtime.MarshalAsXML(req, tags); err != nil { + return nil, err + } + return req, nil } // setTagsHandleResponse handles the SetTags response. @@ -2596,7 +2608,7 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient // storage type. This operation does not update the blob's ETag. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - tier - Indicates the tier to be set on the blob. // - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -2606,7 +2618,7 @@ func (client *BlobClient) SetTier(ctx context.Context, tier AccessTier, options if err != nil { return BlobClientSetTierResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientSetTierResponse{}, err } @@ -2638,7 +2650,7 @@ func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessT if options != nil && options.RehydratePriority != nil { req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2670,7 +2682,7 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient // StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -2684,7 +2696,7 @@ func (client *BlobClient) StartCopyFromURL(ctx context.Context, copySource strin if err != nil { return BlobClientStartCopyFromURLResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientStartCopyFromURLResponse{}, err } @@ -2752,7 +2764,7 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -2819,14 +2831,14 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B // Undelete - Undelete a blob that was previously soft deleted // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method. func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) { req, err := client.undeleteCreateRequest(ctx, options) if err != nil { return BlobClientUndeleteResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlobClientUndeleteResponse{}, err } @@ -2848,7 +2860,7 @@ func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *Bl reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go index 7e81d1540..6b2def53f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go @@ -22,21 +22,10 @@ import ( ) // BlockBlobClient contains the methods for the BlockBlob group. -// Don't use this type directly, use NewBlockBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type BlockBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewBlockBlobClient creates a new instance of BlockBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { - client := &BlockBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // CommitBlockList - The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. @@ -48,7 +37,7 @@ func NewBlockBlobClient(endpoint string, pl runtime.Pipeline) *BlockBlobClient { // belong to. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - blocks - Blob Blocks. // - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList // method. @@ -62,7 +51,7 @@ func (client *BlockBlobClient) CommitBlockList(ctx context.Context, blocks Block if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientCommitBlockListResponse{}, err } @@ -148,7 +137,7 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -165,7 +154,10 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context, req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, blocks) + if err := runtime.MarshalAsXML(req, blocks); err != nil { + return nil, err + } + return req, nil } // commitBlockListHandleResponse handles the CommitBlockList response. @@ -233,7 +225,7 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response // GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together. // - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -243,7 +235,7 @@ func (client *BlockBlobClient) GetBlockList(ctx context.Context, listType BlockL if err != nil { return BlockBlobClientGetBlockListResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientGetBlockListResponse{}, err } @@ -275,7 +267,7 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -335,7 +327,7 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) ( // Block from URL API in conjunction with Put Block List. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request @@ -354,7 +346,7 @@ func (client *BlockBlobClient) PutBlobFromURL(ctx context.Context, contentLength if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientPutBlobFromURLResponse{}, err } @@ -453,7 +445,7 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil { req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -470,6 +462,9 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context, if options != nil && options.CopySourceAuthorization != nil { req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization} } + if options != nil && options.CopySourceTags != nil { + req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)} + } req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -532,7 +527,7 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response) // StageBlock - The Stage Block operation creates a new block to be committed as part of a blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal // to 64 bytes in size. For a given blob, the length of the value specified for the blockid // parameter must be the same size for each block. @@ -547,7 +542,7 @@ func (client *BlockBlobClient) StageBlock(ctx context.Context, blockID string, c if err != nil { return BlockBlobClientStageBlockResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockResponse{}, err } @@ -592,12 +587,15 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil { req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // stageBlockHandleResponse handles the StageBlock response. @@ -653,7 +651,7 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl // are read from a URL. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal // to 64 bytes in size. For a given blob, the length of the value specified for the blockid // parameter must be the same size for each block. @@ -671,7 +669,7 @@ func (client *BlockBlobClient) StageBlockFromURL(ctx context.Context, blockID st if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientStageBlockFromURLResponse{}, err } @@ -732,7 +730,7 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -798,7 +796,7 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon // the content of a block blob, use the Put Block List operation. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - body - Initial data // - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method. @@ -812,7 +810,7 @@ func (client *BlockBlobClient) Upload(ctx context.Context, contentLength int64, if err != nil { return BlockBlobClientUploadResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return BlockBlobClientUploadResponse{}, err } @@ -896,7 +894,7 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -912,8 +910,14 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL if options != nil && options.LegalHold != nil { req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)} } + if options != nil && options.TransactionalContentCRC64 != nil { + req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)} + } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // uploadHandleResponse handles the Upload response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go index 74e6cf1e8..b9d306cac 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go @@ -13,6 +13,7 @@ type AccessTier string const ( AccessTierArchive AccessTier = "Archive" + AccessTierCold AccessTier = "Cold" AccessTierCool AccessTier = "Cool" AccessTierHot AccessTier = "Hot" AccessTierP10 AccessTier = "P10" @@ -33,6 +34,7 @@ const ( func PossibleAccessTierValues() []AccessTier { return []AccessTier{ AccessTierArchive, + AccessTierCold, AccessTierCool, AccessTierHot, AccessTierP10, @@ -53,27 +55,28 @@ func PossibleAccessTierValues() []AccessTier { type AccountKind string const ( - AccountKindStorage AccountKind = "Storage" AccountKindBlobStorage AccountKind = "BlobStorage" - AccountKindStorageV2 AccountKind = "StorageV2" - AccountKindFileStorage AccountKind = "FileStorage" AccountKindBlockBlobStorage AccountKind = "BlockBlobStorage" + AccountKindFileStorage AccountKind = "FileStorage" + AccountKindStorage AccountKind = "Storage" + AccountKindStorageV2 AccountKind = "StorageV2" ) // PossibleAccountKindValues returns the possible values for the AccountKind const type. func PossibleAccountKindValues() []AccountKind { return []AccountKind{ - AccountKindStorage, AccountKindBlobStorage, - AccountKindStorageV2, - AccountKindFileStorage, AccountKindBlockBlobStorage, + AccountKindFileStorage, + AccountKindStorage, + AccountKindStorageV2, } } type ArchiveStatus string const ( + ArchiveStatusRehydratePendingToCold ArchiveStatus = "rehydrate-pending-to-cold" ArchiveStatusRehydratePendingToCool ArchiveStatus = "rehydrate-pending-to-cool" ArchiveStatusRehydratePendingToHot ArchiveStatus = "rehydrate-pending-to-hot" ) @@ -81,25 +84,41 @@ const ( // PossibleArchiveStatusValues returns the possible values for the ArchiveStatus const type. func PossibleArchiveStatusValues() []ArchiveStatus { return []ArchiveStatus{ + ArchiveStatusRehydratePendingToCold, ArchiveStatusRehydratePendingToCool, ArchiveStatusRehydratePendingToHot, } } +type BlobCopySourceTags string + +const ( + BlobCopySourceTagsCOPY BlobCopySourceTags = "COPY" + BlobCopySourceTagsREPLACE BlobCopySourceTags = "REPLACE" +) + +// PossibleBlobCopySourceTagsValues returns the possible values for the BlobCopySourceTags const type. +func PossibleBlobCopySourceTagsValues() []BlobCopySourceTags { + return []BlobCopySourceTags{ + BlobCopySourceTagsCOPY, + BlobCopySourceTagsREPLACE, + } +} + // BlobGeoReplicationStatus - The status of the secondary location type BlobGeoReplicationStatus string const ( - BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" BlobGeoReplicationStatusBootstrap BlobGeoReplicationStatus = "bootstrap" + BlobGeoReplicationStatusLive BlobGeoReplicationStatus = "live" BlobGeoReplicationStatusUnavailable BlobGeoReplicationStatus = "unavailable" ) // PossibleBlobGeoReplicationStatusValues returns the possible values for the BlobGeoReplicationStatus const type. func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { return []BlobGeoReplicationStatus{ - BlobGeoReplicationStatusLive, BlobGeoReplicationStatusBootstrap, + BlobGeoReplicationStatusLive, BlobGeoReplicationStatusUnavailable, } } @@ -107,53 +126,53 @@ func PossibleBlobGeoReplicationStatusValues() []BlobGeoReplicationStatus { type BlobType string const ( + BlobTypeAppendBlob BlobType = "AppendBlob" BlobTypeBlockBlob BlobType = "BlockBlob" BlobTypePageBlob BlobType = "PageBlob" - BlobTypeAppendBlob BlobType = "AppendBlob" ) // PossibleBlobTypeValues returns the possible values for the BlobType const type. func PossibleBlobTypeValues() []BlobType { return []BlobType{ + BlobTypeAppendBlob, BlobTypeBlockBlob, BlobTypePageBlob, - BlobTypeAppendBlob, } } type BlockListType string const ( + BlockListTypeAll BlockListType = "all" BlockListTypeCommitted BlockListType = "committed" BlockListTypeUncommitted BlockListType = "uncommitted" - BlockListTypeAll BlockListType = "all" ) // PossibleBlockListTypeValues returns the possible values for the BlockListType const type. func PossibleBlockListTypeValues() []BlockListType { return []BlockListType{ + BlockListTypeAll, BlockListTypeCommitted, BlockListTypeUncommitted, - BlockListTypeAll, } } type CopyStatusType string const ( - CopyStatusTypePending CopyStatusType = "pending" - CopyStatusTypeSuccess CopyStatusType = "success" CopyStatusTypeAborted CopyStatusType = "aborted" CopyStatusTypeFailed CopyStatusType = "failed" + CopyStatusTypePending CopyStatusType = "pending" + CopyStatusTypeSuccess CopyStatusType = "success" ) // PossibleCopyStatusTypeValues returns the possible values for the CopyStatusType const type. func PossibleCopyStatusTypeValues() []CopyStatusType { return []CopyStatusType{ - CopyStatusTypePending, - CopyStatusTypeSuccess, CopyStatusTypeAborted, CopyStatusTypeFailed, + CopyStatusTypePending, + CopyStatusTypeSuccess, } } @@ -190,15 +209,15 @@ func PossibleDeleteTypeValues() []DeleteType { type EncryptionAlgorithmType string const ( - EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" EncryptionAlgorithmTypeAES256 EncryptionAlgorithmType = "AES256" + EncryptionAlgorithmTypeNone EncryptionAlgorithmType = "None" ) // PossibleEncryptionAlgorithmTypeValues returns the possible values for the EncryptionAlgorithmType const type. func PossibleEncryptionAlgorithmTypeValues() []EncryptionAlgorithmType { return []EncryptionAlgorithmType{ - EncryptionAlgorithmTypeNone, EncryptionAlgorithmTypeAES256, + EncryptionAlgorithmTypeNone, } } @@ -221,50 +240,65 @@ func PossibleExpiryOptionsValues() []ExpiryOptions { } } +type FilterBlobsIncludeItem string + +const ( + FilterBlobsIncludeItemNone FilterBlobsIncludeItem = "none" + FilterBlobsIncludeItemVersions FilterBlobsIncludeItem = "versions" +) + +// PossibleFilterBlobsIncludeItemValues returns the possible values for the FilterBlobsIncludeItem const type. +func PossibleFilterBlobsIncludeItemValues() []FilterBlobsIncludeItem { + return []FilterBlobsIncludeItem{ + FilterBlobsIncludeItemNone, + FilterBlobsIncludeItemVersions, + } +} + type ImmutabilityPolicyMode string const ( + ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" ImmutabilityPolicyModeMutable ImmutabilityPolicyMode = "Mutable" ImmutabilityPolicyModeUnlocked ImmutabilityPolicyMode = "Unlocked" - ImmutabilityPolicyModeLocked ImmutabilityPolicyMode = "Locked" ) // PossibleImmutabilityPolicyModeValues returns the possible values for the ImmutabilityPolicyMode const type. func PossibleImmutabilityPolicyModeValues() []ImmutabilityPolicyMode { return []ImmutabilityPolicyMode{ + ImmutabilityPolicyModeLocked, ImmutabilityPolicyModeMutable, ImmutabilityPolicyModeUnlocked, - ImmutabilityPolicyModeLocked, } } type ImmutabilityPolicySetting string const ( - ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" ImmutabilityPolicySettingLocked ImmutabilityPolicySetting = "Locked" + ImmutabilityPolicySettingUnlocked ImmutabilityPolicySetting = "Unlocked" ) // PossibleImmutabilityPolicySettingValues returns the possible values for the ImmutabilityPolicySetting const type. func PossibleImmutabilityPolicySettingValues() []ImmutabilityPolicySetting { return []ImmutabilityPolicySetting{ - ImmutabilityPolicySettingUnlocked, ImmutabilityPolicySettingLocked, + ImmutabilityPolicySettingUnlocked, } } type LeaseDurationType string const ( - LeaseDurationTypeInfinite LeaseDurationType = "infinite" LeaseDurationTypeFixed LeaseDurationType = "fixed" + LeaseDurationTypeInfinite LeaseDurationType = "infinite" ) // PossibleLeaseDurationTypeValues returns the possible values for the LeaseDurationType const type. func PossibleLeaseDurationTypeValues() []LeaseDurationType { return []LeaseDurationType{ - LeaseDurationTypeInfinite, LeaseDurationTypeFixed, + LeaseDurationTypeInfinite, } } @@ -272,20 +306,20 @@ type LeaseStateType string const ( LeaseStateTypeAvailable LeaseStateType = "available" - LeaseStateTypeLeased LeaseStateType = "leased" - LeaseStateTypeExpired LeaseStateType = "expired" LeaseStateTypeBreaking LeaseStateType = "breaking" LeaseStateTypeBroken LeaseStateType = "broken" + LeaseStateTypeExpired LeaseStateType = "expired" + LeaseStateTypeLeased LeaseStateType = "leased" ) // PossibleLeaseStateTypeValues returns the possible values for the LeaseStateType const type. func PossibleLeaseStateTypeValues() []LeaseStateType { return []LeaseStateType{ LeaseStateTypeAvailable, - LeaseStateTypeLeased, - LeaseStateTypeExpired, LeaseStateTypeBreaking, LeaseStateTypeBroken, + LeaseStateTypeExpired, + LeaseStateTypeLeased, } } @@ -309,14 +343,14 @@ type ListBlobsIncludeItem string const ( ListBlobsIncludeItemCopy ListBlobsIncludeItem = "copy" ListBlobsIncludeItemDeleted ListBlobsIncludeItem = "deleted" - ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" - ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" - ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" - ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" - ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" + ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy" ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold" - ListBlobsIncludeItemDeletedwithversions ListBlobsIncludeItem = "deletedwithversions" + ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata" + ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots" + ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags" + ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs" + ListBlobsIncludeItemVersions ListBlobsIncludeItem = "versions" ) // PossibleListBlobsIncludeItemValues returns the possible values for the ListBlobsIncludeItem const type. @@ -324,30 +358,30 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem { return []ListBlobsIncludeItem{ ListBlobsIncludeItemCopy, ListBlobsIncludeItemDeleted, - ListBlobsIncludeItemMetadata, - ListBlobsIncludeItemSnapshots, - ListBlobsIncludeItemUncommittedblobs, - ListBlobsIncludeItemVersions, - ListBlobsIncludeItemTags, + ListBlobsIncludeItemDeletedwithversions, ListBlobsIncludeItemImmutabilitypolicy, ListBlobsIncludeItemLegalhold, - ListBlobsIncludeItemDeletedwithversions, + ListBlobsIncludeItemMetadata, + ListBlobsIncludeItemSnapshots, + ListBlobsIncludeItemTags, + ListBlobsIncludeItemUncommittedblobs, + ListBlobsIncludeItemVersions, } } type ListContainersIncludeType string const ( - ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" ListContainersIncludeTypeDeleted ListContainersIncludeType = "deleted" + ListContainersIncludeTypeMetadata ListContainersIncludeType = "metadata" ListContainersIncludeTypeSystem ListContainersIncludeType = "system" ) // PossibleListContainersIncludeTypeValues returns the possible values for the ListContainersIncludeType const type. func PossibleListContainersIncludeTypeValues() []ListContainersIncludeType { return []ListContainersIncludeType{ - ListContainersIncludeTypeMetadata, ListContainersIncludeTypeDeleted, + ListContainersIncludeTypeMetadata, ListContainersIncludeTypeSystem, } } @@ -404,18 +438,18 @@ func PossiblePublicAccessTypeValues() []PublicAccessType { type QueryFormatType string const ( + QueryFormatTypeArrow QueryFormatType = "arrow" QueryFormatTypeDelimited QueryFormatType = "delimited" QueryFormatTypeJSON QueryFormatType = "json" - QueryFormatTypeArrow QueryFormatType = "arrow" QueryFormatTypeParquet QueryFormatType = "parquet" ) // PossibleQueryFormatTypeValues returns the possible values for the QueryFormatType const type. func PossibleQueryFormatTypeValues() []QueryFormatType { return []QueryFormatType{ + QueryFormatTypeArrow, QueryFormatTypeDelimited, QueryFormatTypeJSON, - QueryFormatTypeArrow, QueryFormatTypeParquet, } } @@ -440,38 +474,38 @@ func PossibleRehydratePriorityValues() []RehydratePriority { type SKUName string const ( - SKUNameStandardLRS SKUName = "Standard_LRS" + SKUNamePremiumLRS SKUName = "Premium_LRS" SKUNameStandardGRS SKUName = "Standard_GRS" + SKUNameStandardLRS SKUName = "Standard_LRS" SKUNameStandardRAGRS SKUName = "Standard_RAGRS" SKUNameStandardZRS SKUName = "Standard_ZRS" - SKUNamePremiumLRS SKUName = "Premium_LRS" ) // PossibleSKUNameValues returns the possible values for the SKUName const type. func PossibleSKUNameValues() []SKUName { return []SKUName{ - SKUNameStandardLRS, + SKUNamePremiumLRS, SKUNameStandardGRS, + SKUNameStandardLRS, SKUNameStandardRAGRS, SKUNameStandardZRS, - SKUNamePremiumLRS, } } type SequenceNumberActionType string const ( + SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" SequenceNumberActionTypeMax SequenceNumberActionType = "max" SequenceNumberActionTypeUpdate SequenceNumberActionType = "update" - SequenceNumberActionTypeIncrement SequenceNumberActionType = "increment" ) // PossibleSequenceNumberActionTypeValues returns the possible values for the SequenceNumberActionType const type. func PossibleSequenceNumberActionTypeValues() []SequenceNumberActionType { return []SequenceNumberActionType{ + SequenceNumberActionTypeIncrement, SequenceNumberActionTypeMax, SequenceNumberActionTypeUpdate, - SequenceNumberActionTypeIncrement, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go index 5fcac5135..8d325a3a5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go @@ -25,28 +25,17 @@ import ( ) // ContainerClient contains the methods for the Container group. -// Don't use this type directly, use NewContainerClient() instead. +// Don't use this type directly, use a constructor function instead. type ContainerClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewContainerClient creates a new instance of ContainerClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewContainerClient(endpoint string, pl runtime.Pipeline) *ContainerClient { - client := &ContainerClient{ - endpoint: endpoint, - pl: pl, - } - return client } // AcquireLease - [Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite // lease can be between 15 and 60 seconds. A lease duration cannot be changed using // renew or change. @@ -57,7 +46,7 @@ func (client *ContainerClient) AcquireLease(ctx context.Context, duration int32, if err != nil { return ContainerClientAcquireLeaseResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientAcquireLeaseResponse{}, err } @@ -91,7 +80,7 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -138,7 +127,7 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) ( // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) { @@ -146,7 +135,7 @@ func (client *ContainerClient) BreakLease(ctx context.Context, options *Containe if err != nil { return ContainerClientBreakLeaseResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientBreakLeaseResponse{}, err } @@ -179,7 +168,7 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -231,7 +220,7 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed // lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID @@ -243,7 +232,7 @@ func (client *ContainerClient) ChangeLease(ctx context.Context, leaseID string, if err != nil { return ContainerClientChangeLeaseResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientChangeLeaseResponse{}, err } @@ -275,7 +264,7 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -322,7 +311,7 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C // fails // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method. // - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method. func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) { @@ -330,7 +319,7 @@ func (client *ContainerClient) Create(ctx context.Context, options *ContainerCli if err != nil { return ContainerClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientCreateResponse{}, err } @@ -362,7 +351,7 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options if options != nil && options.Access != nil { req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -412,7 +401,7 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai // deleted during garbage collection // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -421,7 +410,7 @@ func (client *ContainerClient) Delete(ctx context.Context, options *ContainerCli if err != nil { return ContainerClientDeleteResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientDeleteResponse{}, err } @@ -452,7 +441,7 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -482,11 +471,89 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai return result, nil } +// FilterBlobs - The Filter Blobs operation enables callers to list blobs in a container whose tags match a given search expression. +// Filter blobs searches within the given container. +// If the operation fails it returns an *azcore.ResponseError type. +// +// Generated from API version 2023-08-03 +// - where - Filters the results to return only to return only blobs whose tags match the specified expression. +// - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) { + req, err := client.filterBlobsCreateRequest(ctx, where, options) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + resp, err := client.internal.Pipeline().Do(req) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + if !runtime.HasStatusCode(resp, http.StatusOK) { + return ContainerClientFilterBlobsResponse{}, runtime.NewResponseError(resp) + } + return client.filterBlobsHandleResponse(resp) +} + +// filterBlobsCreateRequest creates the FilterBlobs request. +func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (*policy.Request, error) { + req, err := runtime.NewRequest(ctx, http.MethodGet, client.endpoint) + if err != nil { + return nil, err + } + reqQP := req.Raw().URL.Query() + reqQP.Set("restype", "container") + reqQP.Set("comp", "blobs") + if options != nil && options.Timeout != nil { + reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) + } + reqQP.Set("where", where) + if options != nil && options.Marker != nil { + reqQP.Set("marker", *options.Marker) + } + if options != nil && options.Maxresults != nil { + reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) + } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } + req.Raw().URL.RawQuery = reqQP.Encode() + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} + if options != nil && options.RequestID != nil { + req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} + } + req.Raw().Header["Accept"] = []string{"application/xml"} + return req, nil +} + +// filterBlobsHandleResponse handles the FilterBlobs response. +func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (ContainerClientFilterBlobsResponse, error) { + result := ContainerClientFilterBlobsResponse{} + if val := resp.Header.Get("x-ms-client-request-id"); val != "" { + result.ClientRequestID = &val + } + if val := resp.Header.Get("x-ms-request-id"); val != "" { + result.RequestID = &val + } + if val := resp.Header.Get("x-ms-version"); val != "" { + result.Version = &val + } + if val := resp.Header.Get("Date"); val != "" { + date, err := time.Parse(time.RFC1123, val) + if err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + result.Date = &date + } + if err := runtime.UnmarshalAsXML(resp, &result.FilterBlobSegment); err != nil { + return ContainerClientFilterBlobsResponse{}, err + } + return result, nil +} + // GetAccessPolicy - gets the permissions for the specified container. The permissions indicate whether container data may // be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -495,7 +562,7 @@ func (client *ContainerClient) GetAccessPolicy(ctx context.Context, options *Con if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccessPolicyResponse{}, err } @@ -521,7 +588,7 @@ func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context, if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -570,7 +637,7 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo // method. func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) { @@ -578,7 +645,7 @@ func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *Cont if err != nil { return ContainerClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetAccountInfoResponse{}, err } @@ -598,7 +665,7 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context, reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -635,7 +702,7 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response) // does not include the container's list of blobs // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) { @@ -643,7 +710,7 @@ func (client *ContainerClient) GetProperties(ctx context.Context, options *Conta if err != nil { return ContainerClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientGetPropertiesResponse{}, err } @@ -668,7 +735,7 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil { req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -761,7 +828,7 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response) // NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager // method. // @@ -790,7 +857,7 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -828,7 +895,7 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp // NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that // acts as a placeholder for all blobs whose names begin with the same substring up to the // appearance of the delimiter character. The delimiter may be a single character or a string. @@ -850,7 +917,7 @@ func (client *ContainerClient) NewListBlobHierarchySegmentPager(delimiter string if err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientListBlobHierarchySegmentResponse{}, err } @@ -888,7 +955,7 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -928,7 +995,7 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -937,7 +1004,7 @@ func (client *ContainerClient) ReleaseLease(ctx context.Context, leaseID string, if err != nil { return ContainerClientReleaseLeaseResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientReleaseLeaseResponse{}, err } @@ -968,7 +1035,7 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1011,7 +1078,7 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) ( // Rename - Renames an existing container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - sourceContainerName - Required. Specifies the name of the container to rename. // - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method. func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) { @@ -1019,7 +1086,7 @@ func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName s if err != nil { return ContainerClientRenameResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenameResponse{}, err } @@ -1042,7 +1109,7 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1080,7 +1147,7 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai // to 60 seconds, or can be infinite // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - leaseID - Specifies the current lease ID on the resource. // - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -1089,7 +1156,7 @@ func (client *ContainerClient) RenewLease(ctx context.Context, leaseID string, o if err != nil { return ContainerClientRenewLeaseResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRenewLeaseResponse{}, err } @@ -1120,7 +1187,7 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1166,14 +1233,14 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co // Restore - Restores a previously-deleted container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method. func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) { req, err := client.restoreCreateRequest(ctx, options) if err != nil { return ContainerClientRestoreResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientRestoreResponse{}, err } @@ -1196,7 +1263,7 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1236,7 +1303,7 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta // may be accessed publicly. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - containerACL - the acls for the container // - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy // method. @@ -1247,7 +1314,7 @@ func (client *ContainerClient) SetAccessPolicy(ctx context.Context, containerACL if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetAccessPolicyResponse{}, err } @@ -1282,7 +1349,7 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil { req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1291,7 +1358,10 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context, XMLName xml.Name `xml:"SignedIdentifiers"` ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"` } - return req, runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}) + if err := runtime.MarshalAsXML(req, wrapper{ContainerACL: &containerACL}); err != nil { + return nil, err + } + return req, nil } // setAccessPolicyHandleResponse handles the SetAccessPolicy response. @@ -1329,7 +1399,7 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response // SetMetadata - operation sets one or more user-defined name-value pairs for the specified container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. // - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method. @@ -1338,7 +1408,7 @@ func (client *ContainerClient) SetMetadata(ctx context.Context, options *Contain if err != nil { return ContainerClientSetMetadataResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSetMetadataResponse{}, err } @@ -1374,7 +1444,7 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil { req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -1417,7 +1487,7 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ @@ -1428,7 +1498,7 @@ func (client *ContainerClient) SubmitBatch(ctx context.Context, contentLength in if err != nil { return ContainerClientSubmitBatchResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ContainerClientSubmitBatchResponse{}, err } @@ -1454,12 +1524,15 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con runtime.SkipBodyDownload(req) req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, multipartContentType) + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil } // submitBatchHandleResponse handles the SubmitBatch response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go index 022807f53..1fed5f630 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go @@ -181,6 +181,8 @@ type BlobClientCopyFromURLOptions struct { BlobTagsString *string // Only Bearer type is supported. Credentials should be a valid OAuth access token to copy source. CopySourceAuthorization *string + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags // Specifies the date time when the blobs immutability policy is set to expire. ImmutabilityPolicyExpiry *time.Time // Specifies the immutability policy mode to set on the blob. @@ -554,6 +556,14 @@ type BlobItem struct { VersionID *string `xml:"VersionId"` } +type BlobName struct { + // The name of the blob. + Content *string `xml:",chardata"` + + // Indicates if the blob name is encoded. + Encoded *bool `xml:"Encoded,attr"` +} + type BlobPrefix struct { // REQUIRED Name *string `xml:"Name"` @@ -689,6 +699,8 @@ type BlockBlobClientPutBlobFromURLOptions struct { CopySourceAuthorization *string // Optional, default is true. Indicates if properties from the source blob should be copied. CopySourceBlobProperties *bool + // Optional, default 'replace'. Indicates if source tags should be copied or replaced with the tags specified by x-ms-tags. + CopySourceTags *BlobCopySourceTags // Optional. Specifies a user-defined name-value pair associated with the blob. If no name-value pairs are specified, the // operation will copy the metadata from the source blob or file to the destination // blob. If one or more name-value pairs are specified, the destination blob is created with the specified metadata, and metadata @@ -767,6 +779,8 @@ type BlockBlobClientUploadOptions struct { // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] Timeout *int32 + // Specify the transactional crc64 for the body, to be validated by the service. + TransactionalContentCRC64 []byte // Specify the transactional md5 for the body, to be validated by the service. TransactionalContentMD5 []byte } @@ -860,6 +874,30 @@ type ContainerClientDeleteOptions struct { Timeout *int32 } +// ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method. +type ContainerClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem + // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The + // operation returns the NextMarker value within the response body if the listing + // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used + // as the value for the marker parameter in a subsequent call to request the next + // page of list items. The marker value is opaque to the client. + Marker *string + // Specifies the maximum number of containers to return. If the request does not specify maxresults, or specifies a value + // greater than 5000, the server will return up to 5000 items. Note that if the + // listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder + // of the results. For this reason, it is possible that the service will + // return fewer results than specified by maxresults, or than the default of 5000. + Maxresults *int32 + // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage + // analytics logging is enabled. + RequestID *string + // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations. + // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations] + Timeout *int32 +} + // ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy method. type ContainerClientGetAccessPolicyOptions struct { // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage @@ -1140,10 +1178,12 @@ type FilterBlobItem struct { ContainerName *string `xml:"ContainerName"` // REQUIRED - Name *string `xml:"Name"` + Name *string `xml:"Name"` + IsCurrentVersion *bool `xml:"IsCurrentVersion"` // Blob tags - Tags *BlobTags `xml:"Tags"` + Tags *BlobTags `xml:"Tags"` + VersionID *string `xml:"VersionId"` } // FilterBlobSegment - The result of a Filter Blobs API call @@ -1533,6 +1573,8 @@ type SequenceNumberAccessConditions struct { // ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. type ServiceClientFilterBlobsOptions struct { + // Include this parameter to specify one or more datasets to include in the response. + Include []FilterBlobsIncludeItem // A string value that identifies the portion of the list of containers to be returned with the next listing operation. The // operation returns the NextMarker value within the response body if the listing // operation did not return all containers remaining to be listed with the current page. The NextMarker value can be used @@ -1674,7 +1716,7 @@ type StaticWebsite struct { } type StorageError struct { - Message *string `json:"Message,omitempty"` + Message *string } // StorageServiceProperties - Storage Service Properties. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go index e5b6cda2b..dc5dba103 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go @@ -101,24 +101,6 @@ func (b BlobHierarchyListSegment) MarshalXML(enc *xml.Encoder, start xml.StartEl return enc.EncodeElement(aux, start) } -// UnmarshalXML implements the xml.Unmarshaller interface for type BlobItem. -func (b *BlobItem) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { - type alias BlobItem - aux := &struct { - *alias - Metadata additionalProperties `xml:"Metadata"` - OrMetadata additionalProperties `xml:"OrMetadata"` - }{ - alias: (*alias)(b), - } - if err := dec.DecodeElement(aux, &start); err != nil { - return err - } - b.Metadata = (map[string]*string)(aux.Metadata) - b.OrMetadata = (map[string]*string)(aux.OrMetadata) - return nil -} - // MarshalXML implements the xml.Marshaller interface for type BlobProperties. func (b BlobProperties) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { type alias BlobProperties @@ -470,6 +452,16 @@ func populate(m map[string]any, k string, v any) { } } +func populateAny(m map[string]any, k string, v any) { + if v == nil { + return + } else if azcore.IsNullValue(v) { + m[k] = nil + } else { + m[k] = v + } +} + func unpopulate(data json.RawMessage, fn string, v any) error { if data == nil { return nil diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go index b209e99f0..b41644c99 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go @@ -22,27 +22,16 @@ import ( ) // PageBlobClient contains the methods for the PageBlob group. -// Don't use this type directly, use NewPageBlobClient() instead. +// Don't use this type directly, use a constructor function instead. type PageBlobClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewPageBlobClient creates a new instance of PageBlobClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewPageBlobClient(endpoint string, pl runtime.Pipeline) *PageBlobClient { - client := &PageBlobClient{ - endpoint: endpoint, - pl: pl, - } - return client } // ClearPages - The Clear Pages operation clears a set of pages from a page blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -56,7 +45,7 @@ func (client *PageBlobClient) ClearPages(ctx context.Context, contentLength int6 if err != nil { return PageBlobClientClearPagesResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientClearPagesResponse{}, err } @@ -122,7 +111,7 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -190,7 +179,7 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag // 2016-05-31. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies // a page blob snapshot. The value should be URL-encoded as it would appear in a request // URI. The source blob must either be public or must be authenticated via a shared access signature. @@ -202,7 +191,7 @@ func (client *PageBlobClient) CopyIncremental(ctx context.Context, copySource st if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCopyIncrementalResponse{}, err } @@ -240,7 +229,7 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context, req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } req.Raw().Header["x-ms-copy-source"] = []string{copySource} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -289,7 +278,7 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response) // Create - The Create operation creates a new page blob. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned // to a 512-byte boundary. @@ -304,7 +293,7 @@ func (client *PageBlobClient) Create(ctx context.Context, contentLength int64, b if err != nil { return PageBlobClientCreateResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientCreateResponse{}, err } @@ -389,7 +378,7 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe if options != nil && options.BlobSequenceNumber != nil { req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -467,7 +456,7 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo // NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot // of a page blob // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -488,7 +477,7 @@ func (client *PageBlobClient) NewGetPageRangesPager(options *PageBlobClientGetPa if err != nil { return PageBlobClientGetPageRangesResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientGetPageRangesResponse{}, err } @@ -542,7 +531,7 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -595,7 +584,7 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) ( // NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that // were changed between target blob and previous snapshot. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager // method. // - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method. @@ -616,7 +605,7 @@ func (client *PageBlobClient) NewGetPageRangesDiffPager(options *PageBlobClientG if err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientGetPageRangesDiffResponse{}, err } @@ -676,7 +665,7 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -729,7 +718,7 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons // Resize - Resize the Blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned // to a 512-byte boundary. // - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method. @@ -742,7 +731,7 @@ func (client *PageBlobClient) Resize(ctx context.Context, blobContentLength int6 if err != nil { return PageBlobClientResizeResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientResizeResponse{}, err } @@ -795,7 +784,7 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -845,7 +834,7 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo // UpdateSequenceNumber - Update the sequence number of the blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to // page blobs only. This property indicates how the service should modify the blob's sequence number // - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber @@ -857,7 +846,7 @@ func (client *PageBlobClient) UpdateSequenceNumber(ctx context.Context, sequence if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUpdateSequenceNumberResponse{}, err } @@ -901,7 +890,7 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont if options != nil && options.BlobSequenceNumber != nil { req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -951,7 +940,7 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp // UploadPages - The Upload Pages operation writes a range of pages to a page blob // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - body - Initial data // - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method. @@ -966,7 +955,7 @@ func (client *PageBlobClient) UploadPages(ctx context.Context, contentLength int if err != nil { return PageBlobClientUploadPagesResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesResponse{}, err } @@ -1038,12 +1027,15 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil { req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, "application/octet-stream") + if err := req.SetBody(body, "application/octet-stream"); err != nil { + return nil, err + } + return req, nil } // uploadPagesHandleResponse handles the UploadPages response. @@ -1116,7 +1108,7 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa // a URL // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - sourceURL - Specify a URL to the copy source. // - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header // and x-ms-range/Range destination range header. @@ -1138,7 +1130,7 @@ func (client *PageBlobClient) UploadPagesFromURL(ctx context.Context, sourceURL if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return PageBlobClientUploadPagesFromURLResponse{}, err } @@ -1222,7 +1214,7 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil { req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)} } - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go index 386c943e4..b52664c93 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go @@ -266,6 +266,9 @@ type BlobClientCopyFromURLResponse struct { // ETag contains the information returned from the ETag header response. ETag *azcore.ETag + // EncryptionScope contains the information returned from the x-ms-encryption-scope header response. + EncryptionScope *string + // LastModified contains the information returned from the Last-Modified header response. LastModified *time.Time @@ -407,6 +410,9 @@ type BlobClientDownloadResponse struct { // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response. CopyStatusDescription *string + // CreationTime contains the information returned from the x-ms-creation-time header response. + CreationTime *time.Time + // Date contains the information returned from the Date header response. Date *time.Time @@ -1310,6 +1316,22 @@ type ContainerClientDeleteResponse struct { Version *string } +// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs. +type ContainerClientFilterBlobsResponse struct { + FilterBlobSegment + // ClientRequestID contains the information returned from the x-ms-client-request-id header response. + ClientRequestID *string `xml:"ClientRequestID"` + + // Date contains the information returned from the Date header response. + Date *time.Time `xml:"Date"` + + // RequestID contains the information returned from the x-ms-request-id header response. + RequestID *string `xml:"RequestID"` + + // Version contains the information returned from the x-ms-version header response. + Version *string `xml:"Version"` +} + // ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy. type ContainerClientGetAccessPolicyResponse struct { // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go index b700211c5..faeefdc53 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go @@ -12,6 +12,7 @@ package generated import ( "context" "fmt" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "io" @@ -22,21 +23,10 @@ import ( ) // ServiceClient contains the methods for the Service group. -// Don't use this type directly, use NewServiceClient() instead. +// Don't use this type directly, use a constructor function instead. type ServiceClient struct { + internal *azcore.Client endpoint string - pl runtime.Pipeline -} - -// NewServiceClient creates a new instance of ServiceClient with the specified values. -// - endpoint - The URL of the service account, container, or blob that is the target of the desired operation. -// - pl - the pipeline used for sending requests and handling responses. -func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { - client := &ServiceClient{ - endpoint: endpoint, - pl: pl, - } - return client } // FilterBlobs - The Filter Blobs operation enables callers to list blobs across all containers whose tags match a given search @@ -44,7 +34,7 @@ func NewServiceClient(endpoint string, pl runtime.Pipeline) *ServiceClient { // be scoped within the expression to a single container. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - where - Filters the results to return only to return only blobs whose tags match the specified expression. // - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method. func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) { @@ -52,7 +42,7 @@ func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, opti if err != nil { return ServiceClientFilterBlobsResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientFilterBlobsResponse{}, err } @@ -80,8 +70,11 @@ func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where if options != nil && options.Maxresults != nil { reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10)) } + if options != nil && options.Include != nil { + reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ",")) + } req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1) - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -117,14 +110,14 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser // GetAccountInfo - Returns the sku name and account kind // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method. func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) { req, err := client.getAccountInfoCreateRequest(ctx, options) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetAccountInfoResponse{}, err } @@ -144,7 +137,7 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op reqQP.Set("restype", "account") reqQP.Set("comp", "properties") req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} req.Raw().Header["Accept"] = []string{"application/xml"} return req, nil } @@ -188,14 +181,14 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) ( // CORS (Cross-Origin Resource Sharing) rules. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method. func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) { req, err := client.getPropertiesCreateRequest(ctx, options) if err != nil { return ServiceClientGetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetPropertiesResponse{}, err } @@ -218,7 +211,7 @@ func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -248,14 +241,14 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S // location endpoint when read-access geo-redundant replication is enabled for the storage account. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method. func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) { req, err := client.getStatisticsCreateRequest(ctx, options) if err != nil { return ServiceClientGetStatisticsResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetStatisticsResponse{}, err } @@ -278,7 +271,7 @@ func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, opt reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -315,7 +308,7 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S // bearer token authentication. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - keyInfo - Key information // - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey // method. @@ -324,7 +317,7 @@ func (client *ServiceClient) GetUserDelegationKey(ctx context.Context, keyInfo K if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientGetUserDelegationKeyResponse{}, err } @@ -347,12 +340,15 @@ func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Conte reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, keyInfo) + if err := runtime.MarshalAsXML(req, keyInfo); err != nil { + return nil, err + } + return req, nil } // getUserDelegationKeyHandleResponse handles the GetUserDelegationKey response. @@ -383,7 +379,7 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo // NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified // account // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager // method. // @@ -411,7 +407,7 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } @@ -441,7 +437,7 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp // and CORS (Cross-Origin Resource Sharing) rules // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - storageServiceProperties - The StorageService properties. // - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method. func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) { @@ -449,7 +445,7 @@ func (client *ServiceClient) SetProperties(ctx context.Context, storageServicePr if err != nil { return ServiceClientSetPropertiesResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSetPropertiesResponse{}, err } @@ -472,12 +468,15 @@ func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, sto reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10)) } req.Raw().URL.RawQuery = reqQP.Encode() - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, runtime.MarshalAsXML(req, storageServiceProperties) + if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil { + return nil, err + } + return req, nil } // setPropertiesHandleResponse handles the SetProperties response. @@ -498,7 +497,7 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S // SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request. // If the operation fails it returns an *azcore.ResponseError type. // -// Generated from API version 2020-10-02 +// Generated from API version 2023-08-03 // - contentLength - The length of the request. // - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header // value: multipart/mixed; boundary=batch_ @@ -509,7 +508,7 @@ func (client *ServiceClient) SubmitBatch(ctx context.Context, contentLength int6 if err != nil { return ServiceClientSubmitBatchResponse{}, err } - resp, err := client.pl.Do(req) + resp, err := client.internal.Pipeline().Do(req) if err != nil { return ServiceClientSubmitBatchResponse{}, err } @@ -534,12 +533,15 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte runtime.SkipBodyDownload(req) req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)} req.Raw().Header["Content-Type"] = []string{multipartContentType} - req.Raw().Header["x-ms-version"] = []string{"2020-10-02"} + req.Raw().Header["x-ms-version"] = []string{ServiceVersion} if options != nil && options.RequestID != nil { req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID} } req.Raw().Header["Accept"] = []string{"application/xml"} - return req, req.SetBody(body, multipartContentType) + if err := req.SetBody(body, multipartContentType); err != nil { + return nil, err + } + return req, nil } // submitBatchHandleResponse handles the SubmitBatch response. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go index ec5541bfb..c1b3a3d27 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go @@ -11,10 +11,15 @@ import ( "errors" ) +const ( + DefaultConcurrency = 5 +) + // BatchTransferOptions identifies options used by doBatchTransfer. type BatchTransferOptions struct { TransferSize int64 ChunkSize int64 + NumChunks uint16 Concurrency uint16 Operation func(ctx context.Context, offset int64, chunkSize int64) error OperationName string @@ -28,13 +33,12 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { } if o.Concurrency == 0 { - o.Concurrency = 5 // default concurrency + o.Concurrency = DefaultConcurrency // default concurrency } // Prepare and do parallel operations. - numChunks := uint16(((o.TransferSize - 1) / o.ChunkSize) + 1) operationChannel := make(chan func() error, o.Concurrency) // Create the channel that release 'concurrency' goroutines concurrently - operationResponseChannel := make(chan error, numChunks) // Holds each response + operationResponseChannel := make(chan error, o.NumChunks) // Holds each response ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -50,10 +54,10 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { } // Add each chunk's operation to the channel. - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ { curChunkSize := o.ChunkSize - if chunkNum == numChunks-1 { // Last chunk + if chunkNum == o.NumChunks-1 { // Last chunk curChunkSize = o.TransferSize - (int64(chunkNum) * o.ChunkSize) // Remove size of all transferred chunks from total } offset := int64(chunkNum) * o.ChunkSize @@ -65,7 +69,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error { // Wait for the operations to complete. var firstErr error = nil - for chunkNum := uint16(0); chunkNum < numChunks; chunkNum++ { + for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ { responseError := <-operationResponseChannel // record the first error (the original error which should cause the other chunks to fail with canceled context) if responseError != nil && firstErr == nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go new file mode 100644 index 000000000..e3aa4a488 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/buffer_manager.go @@ -0,0 +1,70 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. See License.txt in the project root for license information. + +package shared + +type BufferManager[T ~[]byte] interface { + // Acquire returns the channel that contains the pool of buffers. + Acquire() <-chan T + + // Release releases the buffer back to the pool for reuse/cleanup. + Release(T) + + // Grow grows the number of buffers, up to the predefined max. + // It returns the total number of buffers or an error. + // No error is returned if the number of buffers has reached max. + // This is called only from the reading goroutine. + Grow() (int, error) + + // Free cleans up all buffers. + Free() +} + +// mmbPool implements the bufferManager interface. +// it uses anonymous memory mapped files for buffers. +// don't use this type directly, use newMMBPool() instead. +type mmbPool struct { + buffers chan Mmb + count int + max int + size int64 +} + +func NewMMBPool(maxBuffers int, bufferSize int64) BufferManager[Mmb] { + return &mmbPool{ + buffers: make(chan Mmb, maxBuffers), + max: maxBuffers, + size: bufferSize, + } +} + +func (pool *mmbPool) Acquire() <-chan Mmb { + return pool.buffers +} + +func (pool *mmbPool) Grow() (int, error) { + if pool.count < pool.max { + buffer, err := NewMMB(pool.size) + if err != nil { + return 0, err + } + pool.buffers <- buffer + pool.count++ + } + return pool.count, nil +} + +func (pool *mmbPool) Release(buffer Mmb) { + pool.buffers <- buffer +} + +func (pool *mmbPool) Free() { + for i := 0; i < pool.count; i++ { + buffer := <-pool.buffers + buffer.Delete() + } + pool.count = 0 +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go similarity index 89% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go index 5f7f4d828..cdcadf311 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_unix.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go @@ -5,7 +5,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -package blockblob +package shared import ( "fmt" @@ -14,20 +14,20 @@ import ( ) // mmb is a memory mapped buffer -type mmb []byte +type Mmb []byte // newMMB creates a new memory mapped buffer with the specified size -func newMMB(size int64) (mmb, error) { +func NewMMB(size int64) (Mmb, error) { prot, flags := syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE addr, err := syscall.Mmap(-1, 0, int(size), prot, flags) if err != nil { return nil, os.NewSyscallError("Mmap", err) } - return mmb(addr), nil + return Mmb(addr), nil } // delete cleans up the memory mapped buffer -func (m *mmb) delete() { +func (m *Mmb) Delete() { err := syscall.Munmap(*m) *m = nil if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go similarity index 82% rename from vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go rename to vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go index 3f966d65b..ef9fdc2a1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/mmf_windows.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_windows.go @@ -4,7 +4,7 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for license information. -package blockblob +package shared import ( "fmt" @@ -14,11 +14,11 @@ import ( "unsafe" ) -// mmb is a memory mapped buffer -type mmb []byte +// Mmb is a memory mapped buffer +type Mmb []byte -// newMMB creates a new memory mapped buffer with the specified size -func newMMB(size int64) (mmb, error) { +// NewMMB creates a new memory mapped buffer with the specified size +func NewMMB(size int64) (Mmb, error) { const InvalidHandleValue = ^uintptr(0) // -1 prot, access := uint32(syscall.PAGE_READWRITE), uint32(syscall.FILE_MAP_WRITE) @@ -35,7 +35,7 @@ func newMMB(size int64) (mmb, error) { return nil, os.NewSyscallError("MapViewOfFile", err) } - m := mmb{} + m := Mmb{} h := (*reflect.SliceHeader)(unsafe.Pointer(&m)) h.Data = addr h.Len = int(size) @@ -43,10 +43,10 @@ func newMMB(size int64) (mmb, error) { return m, nil } -// delete cleans up the memory mapped buffer -func (m *mmb) delete() { +// Delete cleans up the memory mapped buffer +func (m *Mmb) Delete() { addr := uintptr(unsafe.Pointer(&(([]byte)(*m)[0]))) - *m = mmb{} + *m = Mmb{} err := syscall.UnmapViewOfFile(addr) if err != nil { // if we get here, there is likely memory corruption. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go index 02936ad03..1de60999e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/shared.go @@ -44,6 +44,15 @@ const ( const crc64Polynomial uint64 = 0x9A6C9329AC4BC9B5 +const ( + AppendBlobClient = "azblob/appendblob.Client" + BlobClient = "azblob/blob.Client" + BlockBlobClient = "azblob/blockblob.Client" + ContainerClient = "azblob/container.Client" + PageBlobClient = "azblob/pageblob.Client" + ServiceClient = "azblob/service.Client" +) + var CRC64Table = crc64.MakeTable(crc64Polynomial) // CopyOptions returns a zero-value T if opts is nil. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go index 69328dbeb..7e534cee1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go @@ -37,10 +37,13 @@ type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient] func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -49,9 +52,12 @@ func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptio // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewPageBlobClient(blobURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -61,10 +67,13 @@ func NewClientWithNoCredential(blobURL string, options *ClientOptions) (*Client, func NewClientWithSharedKeyCredential(blobURL string, cred *blob.SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewPageBlobClient(blobURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.PageBlobClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewPageBlobClient(blobURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -119,7 +128,7 @@ func (pb *Client) WithSnapshot(snapshot string) (*Client, error) { } p.Snapshot = snapshot - return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil } // WithVersionID creates a new PageBlobURL object identical to the source but with the specified snapshot timestamp. @@ -131,7 +140,7 @@ func (pb *Client) WithVersionID(versionID string) (*Client, error) { } p.VersionID = versionID - return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().Pipeline(), pb.sharedKey())), nil + return (*Client)(base.NewPageBlobClient(p.String(), pb.generated().InternalClient(), pb.sharedKey())), nil } // Create creates a page blob of the specified length. Call PutPage to upload data to a page blob. @@ -228,7 +237,7 @@ func (pb *Client) NewGetPageRangesPager(o *GetPageRangesOptions) *runtime.Pager[ if err != nil { return GetPageRangesResponse{}, err } - resp, err := pb.generated().Pipeline().Do(req) + resp, err := pb.generated().InternalClient().Pipeline().Do(req) if err != nil { return GetPageRangesResponse{}, err } @@ -261,7 +270,7 @@ func (pb *Client) NewGetPageRangesDiffPager(o *GetPageRangesDiffOptions) *runtim if err != nil { return GetPageRangesDiffResponse{}, err } - resp, err := pb.generated().Pipeline().Do(req) + resp, err := pb.generated().InternalClient().Pipeline().Do(req) if err != nil { return GetPageRangesDiffResponse{}, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go index 454a08cb9..4069bb132 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/account.go @@ -25,13 +25,14 @@ type UserDelegationCredential = exported.UserDelegationCredential // AccountSignatureValues is used to generate a Shared Access Signature (SAS) for an Azure Storage account. // For more information, see https://docs.microsoft.com/rest/api/storageservices/constructing-an-account-sas type AccountSignatureValues struct { - Version string `param:"sv"` // If not specified, this format to SASVersion - Protocol Protocol `param:"spr"` // See the SASProtocol* constants - StartTime time.Time `param:"st"` // Not specified if IsZero - ExpiryTime time.Time `param:"se"` // Not specified if IsZero - Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() - IPRange IPRange `param:"sip"` - ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() + Version string `param:"sv"` // If not specified, this format to SASVersion + Protocol Protocol `param:"spr"` // See the SASProtocol* constants + StartTime time.Time `param:"st"` // Not specified if IsZero + ExpiryTime time.Time `param:"se"` // Not specified if IsZero + Permissions string `param:"sp"` // Create by initializing AccountPermissions and then call String() + IPRange IPRange `param:"sip"` + ResourceTypes string `param:"srt"` // Create by initializing AccountResourceTypes and then call String() + EncryptionScope string `param:"ses"` } // SignWithSharedKey uses an account's shared key credential to sign this signature values to produce @@ -68,6 +69,7 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey v.IPRange.String(), string(v.Protocol), v.Version, + v.EncryptionScope, ""}, // That is right, the account SAS requires a terminating extra newline "\n") @@ -77,12 +79,13 @@ func (v AccountSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKey } p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Account-specific SAS parameters services: "b", // will always be "b" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go index 4d97372da..4c23208e2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go @@ -23,7 +23,7 @@ const ( var ( // Version is the default version encoded in the SAS token. - Version = "2020-02-10" + Version = "2021-12-02" ) // TimeFormats ISO 8601 format. @@ -143,6 +143,7 @@ type QueryParameters struct { authorizedObjectID string `param:"saoid"` unauthorizedObjectID string `param:"suoid"` correlationID string `param:"scid"` + encryptionScope string `param:"ses"` // private member used for startTime and expiryTime formatting. stTimeFormat string seTimeFormat string @@ -163,6 +164,11 @@ func (p *QueryParameters) SignedCorrelationID() string { return p.correlationID } +// EncryptionScope returns encryptionScope +func (p *QueryParameters) EncryptionScope() string { + return p.encryptionScope +} + // SignedOID returns signedOID. func (p *QueryParameters) SignedOID() string { return p.signedOID @@ -355,6 +361,9 @@ func (p *QueryParameters) Encode() string { if p.correlationID != "" { v.Add("scid", p.correlationID) } + if p.encryptionScope != "" { + v.Add("ses", p.encryptionScope) + } return v.Encode() } @@ -429,6 +438,8 @@ func NewQueryParameters(values url.Values, deleteSASParametersFromValues bool) Q p.unauthorizedObjectID = val case "scid": p.correlationID = val + case "ses": + p.encryptionScope = val default: isSASKey = false // We didn't recognize the query parameter } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go index b900e2b06..45f730847 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go @@ -40,6 +40,7 @@ type BlobSignatureValues struct { AuthorizedObjectID string // saoid UnauthorizedObjectID string // suoid CorrelationID string // scid + EncryptionScope string `param:"ses"` } func getDirectoryDepth(path string) string { @@ -51,17 +52,11 @@ func getDirectoryDepth(path string) string { // SignWithSharedKey uses an account's SharedKeyCredential to sign this signature values to produce the proper SAS query parameters. func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCredential) (QueryParameters, error) { - if v.ExpiryTime.IsZero() || v.Permissions == "" { + if v.Identifier == "" && (v.ExpiryTime.IsZero() || v.Permissions == "") { return QueryParameters{}, errors.New("service SAS is missing at least one of these: ExpiryTime or Permissions") } - //Make sure the permission characters are in the correct order - perms, err := parseBlobPermissions(v.Permissions) - if err != nil { - return QueryParameters{}, err - } - v.Permissions = perms.String() - + // Parse the resource resource := "c" if !v.SnapshotTime.IsZero() { resource = "bs" @@ -76,6 +71,21 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre resource = "b" } + // make sure the permission characters are in the correct order + if resource == "c" { + perms, err := parseContainerPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } else { + perms, err := parseBlobPermissions(v.Permissions) + if err != nil { + return QueryParameters{}, err + } + v.Permissions = perms.String() + } + if v.Version == "" { v.Version = Version } @@ -94,7 +104,8 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre string(v.Protocol), v.Version, resource, - snapshotTime, // signed timestamp + snapshotTime, // signed timestamp + v.EncryptionScope, v.CacheControl, // rscc v.ContentDisposition, // rscd v.ContentEncoding, // rsce @@ -109,12 +120,13 @@ func (v BlobSignatureValues) SignWithSharedKey(sharedKeyCredential *SharedKeyCre p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Container/Blob-specific SAS parameters resource: resource, @@ -202,7 +214,8 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us string(v.Protocol), v.Version, resource, - snapshotTime, // signed timestamp + snapshotTime, // signed timestamp + v.EncryptionScope, v.CacheControl, // rscc v.ContentDisposition, // rscd v.ContentEncoding, // rsce @@ -217,12 +230,13 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us p := QueryParameters{ // Common SAS parameters - version: v.Version, - protocol: v.Protocol, - startTime: v.StartTime, - expiryTime: v.ExpiryTime, - permissions: v.Permissions, - ipRange: v.IPRange, + version: v.Version, + protocol: v.Protocol, + startTime: v.StartTime, + expiryTime: v.ExpiryTime, + permissions: v.Permissions, + ipRange: v.IPRange, + encryptionScope: v.EncryptionScope, // Container/Blob-specific SAS parameters resource: resource, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go index fb7b8696a..461775347 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go @@ -42,10 +42,13 @@ type Client base.Client[generated.ServiceClient] func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) { authPolicy := shared.NewStorageChallengePolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewServiceClient(serviceURL, pl, &cred)), nil + azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewServiceClient(serviceURL, azClient, &cred)), nil } // NewClientWithNoCredential creates an instance of Client with the specified values. @@ -54,9 +57,12 @@ func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOp // - options - client options; pass nil to accept the default values func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Client, error) { conOptions := shared.GetClientOptions(options) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) - return (*Client)(base.NewServiceClient(serviceURL, pl, nil)), nil + azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + return (*Client)(base.NewServiceClient(serviceURL, azClient, nil)), nil } // NewClientWithSharedKeyCredential creates an instance of Client with the specified values. @@ -66,10 +72,14 @@ func NewClientWithNoCredential(serviceURL string, options *ClientOptions) (*Clie func NewClientWithSharedKeyCredential(serviceURL string, cred *SharedKeyCredential, options *ClientOptions) (*Client, error) { authPolicy := exported.NewSharedKeyCredPolicy(cred) conOptions := shared.GetClientOptions(options) - conOptions.PerRetryPolicies = append(conOptions.PerRetryPolicies, authPolicy) - pl := runtime.NewPipeline(exported.ModuleName, exported.ModuleVersion, runtime.PipelineOptions{}, &conOptions.ClientOptions) + plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}} - return (*Client)(base.NewServiceClient(serviceURL, pl, cred)), nil + azClient, err := azcore.NewClient(shared.ServiceClient, exported.ModuleVersion, plOpts, &conOptions.ClientOptions) + if err != nil { + return nil, err + } + + return (*Client)(base.NewServiceClient(serviceURL, azClient, cred)), nil } // NewClientFromConnectionString creates an instance of Client with the specified values. @@ -135,7 +145,7 @@ func (s *Client) URL() string { // this Client's URL. The new container.Client uses the same request policy pipeline as the Client. func (s *Client) NewContainerClient(containerName string) *container.Client { containerURL := runtime.JoinPaths(s.generated().Endpoint(), containerName) - return (*container.Client)(base.NewContainerClient(containerURL, s.generated().Pipeline(), s.credential())) + return (*container.Client)(base.NewContainerClient(containerURL, s.generated().InternalClient().WithClientName(shared.ContainerClient), s.credential())) } // CreateContainer is a lifecycle method to creates a new container under the specified account. @@ -184,6 +194,9 @@ func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager if o.Include.Metadata { listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeMetadata) } + if o.Include.System { + listOptions.Include = append(listOptions.Include, generated.ListContainersIncludeTypeSystem) + } listOptions.Marker = o.Marker listOptions.Maxresults = o.MaxResults listOptions.Prefix = o.Prefix @@ -204,7 +217,7 @@ func (s *Client) NewListContainersPager(o *ListContainersOptions) *runtime.Pager if err != nil { return ListContainersResponse{}, err } - resp, err := s.generated().Pipeline().Do(req) + resp, err := s.generated().InternalClient().Pipeline().Do(req) if err != nil { return ListContainersResponse{}, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go index 22f49474d..b70724d79 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/models.go @@ -156,6 +156,9 @@ type ListContainersInclude struct { // Tells the service whether to return soft-deleted containers. Deleted bool + + // Tells the service whether to return system containers. + System bool } // --------------------------------------------------------------------------------------------------------------------- diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md index 47da5cbbf..48105a44b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md @@ -1,3 +1,1132 @@ +# Release (2023-11-01) + +## General Highlights +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.74.0](service/connect/CHANGELOG.md#v1740-2023-11-01) + * **Feature**: Adds the BatchGetFlowAssociation API which returns flow associations (flow-resource) corresponding to the list of resourceArns supplied in the request. This release also adds IsDefault, LastModifiedRegion and LastModifiedTime fields to the responses of several Describe and List APIs. +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.19.0](service/globalaccelerator/CHANGELOG.md#v1190-2023-11-01) + * **Feature**: Global Accelerator now support accelerators with cross account endpoints. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.61.0](service/rds/CHANGELOG.md#v1610-2023-11-01) + * **Feature**: This release adds support for customized networking resources to Amazon RDS Custom. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.35.0](service/redshift/CHANGELOG.md#v1350-2023-11-01) + * **Feature**: Added support for Multi-AZ deployments for Provisioned RA3 clusters that provide 99.99% SLA availability. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.115.0](service/sagemaker/CHANGELOG.md#v11150-2023-11-01) + * **Feature**: Support for batch transform input in Model dashboard + +# Release (2023-10-31) + +## General Highlights +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.16.0](service/amplify/CHANGELOG.md#v1160-2023-10-31) + * **Feature**: Add backend field to CreateBranch and UpdateBranch requests. Add pagination support for ListApps, ListDomainAssociations, ListBranches, and ListJobs +* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.20.0](service/applicationinsights/CHANGELOG.md#v1200-2023-10-31) + * **Feature**: Automate attaching managed policies +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.129.0](service/ec2/CHANGELOG.md#v11290-2023-10-31) + * **Feature**: Capacity Blocks for ML are a new EC2 purchasing option for reserving GPU instances on a future date to support short duration machine learning (ML) workloads. Capacity Blocks automatically place instances close together inside Amazon EC2 UltraClusters for low-latency, high-throughput networking. +* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.8.0](service/m2/CHANGELOG.md#v180-2023-10-31) + * **Feature**: Added name filter ability for ListDataSets API, added ForceUpdate for Updating environment and BatchJob submission using S3BatchJobIdentifier +* `github.com/aws/aws-sdk-go-v2/service/neptunedata`: [v1.1.0](service/neptunedata/CHANGELOG.md#v110-2023-10-31) + * **Feature**: Minor change to not retry CancelledByUserException +* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.20.0](service/translate/CHANGELOG.md#v1200-2023-10-31) + * **Feature**: Added support for Brevity translation settings feature. + +# Release (2023-10-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.72.0](service/connect/CHANGELOG.md#v1720-2023-10-30) + * **Feature**: This release adds InstanceId field for phone number APIs. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.22.0](service/dataexchange/CHANGELOG.md#v1220-2023-10-30) + * **Feature**: We added a new API action: SendDataSetNotification. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.30.0](service/datasync/CHANGELOG.md#v1300-2023-10-30) + * **Feature**: Platform version changes to support AL1 deprecation initiative. +* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.14.0](service/finspace/CHANGELOG.md#v1140-2023-10-30) + * **Feature**: Introducing new API UpdateKxClusterCodeConfiguration, introducing new cache types for clusters and introducing new deployment modes for updating clusters. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagev2`: [v1.4.0](service/mediapackagev2/CHANGELOG.md#v140-2023-10-30) + * **Feature**: This feature allows customers to create a combination of manifest filtering, startover and time delay configuration that applies to all egress requests by default. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.59.0](service/rds/CHANGELOG.md#v1590-2023-10-30) + * **Feature**: This release launches the CreateIntegration, DeleteIntegration, and DescribeIntegrations APIs to manage zero-ETL Integrations. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.9.0](service/redshiftserverless/CHANGELOG.md#v190-2023-10-30) + * **Feature**: Added support for custom domain names for Amazon Redshift Serverless workgroups. This feature enables customers to create a custom domain name and use ACM to generate fully secure connections to it. +* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.14.0](service/resiliencehub/CHANGELOG.md#v1140-2023-10-30) + * **Feature**: Introduced the ability to filter applications by their last assessment date and time and have included metrics for the application's estimated workload Recovery Time Objective (RTO) and estimated workload Recovery Point Objective (RPO). +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.19.0](service/s3outposts/CHANGELOG.md#v1190-2023-10-30) + * **Feature**: Updated ListOutpostsWithS3 API response to include S3OutpostArn for use with AWS RAM. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.16.1](service/wisdom/CHANGELOG.md#v1161-2023-10-30) + * **Documentation**: This release added necessary API documents on creating a Wisdom knowledge base to integrate with S3. + +# Release (2023-10-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.30.0](service/emr/CHANGELOG.md#v1300-2023-10-27) + * **Feature**: Updated CreateCluster API request and DescribeCluster API responses to include EbsRootVolumeIops, and EbsRootVolumeThroughput attributes that specify the user configured root volume IOPS and throughput for Amazon EBS root device volume. This feature will be available from Amazon EMR releases 6.15.0 +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.24.0](service/neptune/CHANGELOG.md#v1240-2023-10-27) + * **Feature**: Update TdeCredentialPassword type to SensitiveString +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.23.1](service/pinpoint/CHANGELOG.md#v1231-2023-10-27) + * **Documentation**: Updated documentation to describe the case insensitivity for EndpointIds. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.33.0](service/redshift/CHANGELOG.md#v1330-2023-10-27) + * **Feature**: added support to create a dual stack cluster +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.40.1](service/wafv2/CHANGELOG.md#v1401-2023-10-27) + * **Documentation**: Updates the descriptions for the calls that manage web ACL associations, to provide information for customer-managed IAM policies. + +# Release (2023-10-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.26.0](service/appstream/CHANGELOG.md#v1260-2023-10-26) + * **Feature**: This release introduces multi-session fleets, allowing customers to provision more than one user session on a single fleet instance. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.128.0](service/ec2/CHANGELOG.md#v11280-2023-10-26) + * **Feature**: Launching GetSecurityGroupsForVpc API. This API gets security groups that can be associated by the AWS account making the request with network interfaces in the specified VPC. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.32.0](service/networkfirewall/CHANGELOG.md#v1320-2023-10-26) + * **Feature**: Network Firewall now supports inspection of outbound SSL/TLS traffic. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.23.0](service/opensearch/CHANGELOG.md#v1230-2023-10-26) + * **Feature**: You can specify ipv4 or dualstack IPAddressType for cluster endpoints. If you specify IPAddressType as dualstack, the new endpoint will be visible under the 'EndpointV2' parameter and will support IPv4 and IPv6 requests. Whereas, the 'Endpoint' will continue to serve IPv4 requests. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.32.0](service/redshift/CHANGELOG.md#v1320-2023-10-26) + * **Feature**: Add Redshift APIs GetResourcePolicy, DeleteResourcePolicy, PutResourcePolicy and DescribeInboundIntegrations for the new Amazon Redshift Zero-ETL integration feature, which can be used to control data ingress into Redshift namespace, and view inbound integrations. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.113.0](service/sagemaker/CHANGELOG.md#v11130-2023-10-26) + * **Feature**: Amazon Sagemaker Autopilot now supports Text Generation jobs. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.23.0](service/sns/CHANGELOG.md#v1230-2023-10-26) + * **Feature**: Message Archiving and Replay is now supported in Amazon SNS for FIFO topics. +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.6.0](service/ssmsap/CHANGELOG.md#v160-2023-10-26) + * **Feature**: AWS Systems Manager for SAP added support for registration and discovery of SAP ABAP applications +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.35.0](service/transfer/CHANGELOG.md#v1350-2023-10-26) + * **Feature**: No API changes from previous release. This release migrated the model to Smithy keeping all features unchanged. + +# Release (2023-10-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.8.0](service/connectcases/CHANGELOG.md#v180-2023-10-25) + * **Feature**: Increase maximum length of CommentBody to 3000, and increase maximum length of StringValue to 1500 +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.20.0](service/groundstation/CHANGELOG.md#v1200-2023-10-25) + * **Feature**: This release will allow KMS alias names to be used when creating Mission Profiles +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.24.0](service/iam/CHANGELOG.md#v1240-2023-10-25) + * **Feature**: Updates to GetAccessKeyLastUsed action to replace NoSuchEntity error with AccessDeniedException error. + +# Release (2023-10-24) + +## General Highlights +* **Feature**: **BREAKFIX**: Correct nullability and default value representation of various input fields across a large number of services. Calling code that references one or more of the affected fields will need to update usage accordingly. See [2162](https://github.com/aws/aws-sdk-go-v2/issues/2162). +* **Feature**: **BREAKFIX**: Correct nullability representation of APIGateway-based services. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.18.0](service/codepipeline/CHANGELOG.md#v1180-2023-10-24) + * **Feature**: Add ability to trigger pipelines from git tags, define variables at pipeline level and new pipeline type V2. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.127.0](service/ec2/CHANGELOG.md#v11270-2023-10-24) + * **Feature**: This release updates the documentation for InstanceInterruptionBehavior and HibernationOptionsRequest to more accurately describe the behavior of these two parameters when using Spot hibernation. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.30.0](service/eks/CHANGELOG.md#v1300-2023-10-24) + * **Feature**: Added support for Cluster Subnet and Security Group mutability. +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.23.0](service/iam/CHANGELOG.md#v1230-2023-10-24) + * **Feature**: Add the partitional endpoint for IAM in iso-f. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.16.0](service/migrationhubconfig/CHANGELOG.md#v1160-2023-10-24) + * **Feature**: This release introduces DeleteHomeRegionControl API that customers can use to delete the Migration Hub Home Region configuration +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.12.0](service/migrationhubstrategy/CHANGELOG.md#v1120-2023-10-24) + * **Feature**: This release introduces multi-data-source feature in Migration Hub Strategy Recommendations. This feature now supports vCenter as a data source to fetch inventory in addition to ADS and Import from file workflow that is currently supported with MHSR collector. +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.6.0](service/opensearchserverless/CHANGELOG.md#v160-2023-10-24) + * **Feature**: This release includes the following new APIs: CreateLifecyclePolicy, UpdateLifecyclePolicy, BatchGetLifecyclePolicy, DeleteLifecyclePolicy, ListLifecyclePolicies and BatchGetEffectiveLifecyclePolicy to support the data lifecycle management feature. + +# Release (2023-10-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.15.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1150-2023-10-23) + * **Feature**: The StartSupportDataExport operation has been deprecated as part of the Product Support Connection deprecation. As of December 2022, Product Support Connection is no longer supported. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.20.0](service/networkmanager/CHANGELOG.md#v1200-2023-10-23) + * **Feature**: This release adds API support for Tunnel-less Connect (NoEncap Protocol) for AWS Cloud WAN +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.8.0](service/redshiftserverless/CHANGELOG.md#v180-2023-10-23) + * **Feature**: This release adds support for customers to see the patch version and workgroup version in Amazon Redshift Serverless. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.32.0](service/rekognition/CHANGELOG.md#v1320-2023-10-23) + * **Feature**: Amazon Rekognition introduces StartMediaAnalysisJob, GetMediaAnalysisJob, and ListMediaAnalysisJobs operations to run a bulk analysis of images with a Detect Moderation model. + +# Release (2023-10-20) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.22.0](service/appconfig/CHANGELOG.md#v1220-2023-10-20) + * **Feature**: Update KmsKeyIdentifier constraints to support AWS KMS multi-Region keys. +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.19.0](service/appintegrations/CHANGELOG.md#v1190-2023-10-20) + * **Feature**: Updated ScheduleConfig to be an optional input to CreateDataIntegration to support event driven downloading of files from sources such as Amazon s3 using Amazon Connect AppIntegrations. +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.19.0](service/applicationdiscoveryservice/CHANGELOG.md#v1190-2023-10-20) + * **Feature**: This release introduces three new APIs: StartBatchDeleteConfigurationTask, DescribeBatchDeleteConfigurationTask, and BatchDeleteAgents. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.70.0](service/connect/CHANGELOG.md#v1700-2023-10-20) + * **Feature**: This release adds support for updating phone number metadata, such as phone number description. +* `github.com/aws/aws-sdk-go-v2/service/medicalimaging`: [v1.2.3](service/medicalimaging/CHANGELOG.md#v123-2023-10-20) + * **Documentation**: Updates on documentation links +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.39.0](service/ssm/CHANGELOG.md#v1390-2023-10-20) + * **Feature**: This release introduces a new API: DeleteOpsItem. This allows deletion of an OpsItem. + +# Release (2023-10-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.126.0](service/ec2/CHANGELOG.md#v11260-2023-10-19) + * **Feature**: Amazon EC2 C7a instances, powered by 4th generation AMD EPYC processors, are ideal for high performance, compute-intensive workloads such as high performance computing. Amazon EC2 R7i instances are next-generation memory optimized and powered by custom 4th Generation Intel Xeon Scalable processors. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchainquery`: [v1.3.0](service/managedblockchainquery/CHANGELOG.md#v130-2023-10-19) + * **Feature**: This release adds support for Ethereum Sepolia network +* `github.com/aws/aws-sdk-go-v2/service/neptunedata`: [v1.0.4](service/neptunedata/CHANGELOG.md#v104-2023-10-19) + * **Documentation**: Doc changes to add IAM action mappings for the data actions. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.11.0](service/omics/CHANGELOG.md#v1110-2023-10-19) + * **Feature**: This change enables customers to retrieve failure reasons with detailed status messages for their failed runs +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.21.0](service/opensearch/CHANGELOG.md#v1210-2023-10-19) + * **Feature**: Added Cluster Administrative options for node restart, opensearch process restart and opensearch dashboard restart for Multi-AZ without standby domains +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.46.0](service/quicksight/CHANGELOG.md#v1460-2023-10-19) + * **Feature**: This release adds the following: 1) Trino and Starburst Database Connectors 2) Custom total for tables and pivot tables 3) Enable restricted folders 4) Add rolling dates for time equality filters 5) Refine DataPathValue and introduce DataPathType 6) Add SeriesType to ReferenceLineDataConfiguration +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.21.6](service/secretsmanager/CHANGELOG.md#v1216-2023-10-19) + * **Documentation**: Documentation updates for Secrets Manager +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.22.0](service/servicecatalog/CHANGELOG.md#v1220-2023-10-19) + * **Feature**: Introduce support for EXTERNAL product and provisioning artifact type in CreateProduct and CreateProvisioningArtifact APIs. +* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.3.0](service/verifiedpermissions/CHANGELOG.md#v130-2023-10-19) + * **Feature**: Improving Amazon Verified Permissions Create experience +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.31.3](service/workspaces/CHANGELOG.md#v1313-2023-10-19) + * **Documentation**: Documentation updates for WorkSpaces + +# Release (2023-10-18) + +## General Highlights +* **Feature**: Add handwritten paginators that were present in some services in the v1 SDK. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.18.11](service/cloud9/CHANGELOG.md#v11811-2023-10-18) + * **Documentation**: Update to imageId parameter behavior and dates updated. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.23.0](service/dynamodb/CHANGELOG.md#v1230-2023-10-18) + * **Documentation**: Updating descriptions for several APIs. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.44.0](service/kendra/CHANGELOG.md#v1440-2023-10-18) + * **Feature**: Changes for a new feature in Amazon Kendra's Query API to Collapse/Expand query results +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.57.0](service/rds/CHANGELOG.md#v1570-2023-10-18) + * **Feature**: This release adds support for upgrading the storage file system configuration on the DB instance using a blue/green deployment or a read replica. +* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.16.0](service/wisdom/CHANGELOG.md#v1160-2023-10-18) + * **Feature**: This release adds an max limit of 25 recommendation ids for NotifyRecommendationsReceived API. + +# Release (2023-10-17) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.18.0](service/applicationdiscoveryservice/CHANGELOG.md#v1180-2023-10-17) + * **Feature**: This release introduces three new APIs: StartBatchDeleteConfigurationTask, DescribeBatchDeleteConfigurationTask, and BatchDeleteAgents. +* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.17.0](service/codepipeline/CHANGELOG.md#v1170-2023-10-17) + * **Feature**: Add retryMode ALL_ACTIONS to RetryStageExecution API that retries a failed stage starting from first action in the stage +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.30.4](service/ecs/CHANGELOG.md#v1304-2023-10-17) + * **Documentation**: Documentation only updates to address Amazon ECS tickets. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.29.0](service/guardduty/CHANGELOG.md#v1290-2023-10-17) + * **Feature**: Add domainWithSuffix finding field to dnsRequestAction +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.23.0](service/kafka/CHANGELOG.md#v1230-2023-10-17) + * **Feature**: AWS Managed Streaming for Kafka is launching MSK Replicator, a new feature that enables customers to reliably replicate data across Amazon MSK clusters in same or different AWS regions. You can now use SDK to create, list, describe, delete, update, and manage tags of MSK Replicators. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.14.0](service/route53recoverycluster/CHANGELOG.md#v1140-2023-10-17) + * **Feature**: Adds Owner field to ListRoutingControls API. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.14.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1140-2023-10-17) + * **Feature**: Adds permissions for GetResourcePolicy to support returning details about AWS Resource Access Manager resource policies for shared resources. + +# Release (2023-10-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.19.0](config/CHANGELOG.md#v1190-2023-10-16) + * **Feature**: Modify logic of retrieving user agent appID from env config +* `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`: [v1.4.0](feature/cloudfront/sign/CHANGELOG.md#v140-2023-10-16) + * **Feature**: Add support for loading PKCS8-formatted private keys. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.35.0](service/cloudformation/CHANGELOG.md#v1350-2023-10-16) + * **Feature**: SDK and documentation updates for UpdateReplacePolicy +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.17.0](service/drs/CHANGELOG.md#v1170-2023-10-16) + * **Feature**: Updated exsiting API to allow AWS Elastic Disaster Recovery support of launching recovery into existing EC2 instances. +* `github.com/aws/aws-sdk-go-v2/service/entityresolution`: [v1.3.0](service/entityresolution/CHANGELOG.md#v130-2023-10-16) + * **Feature**: This launch expands our matching techniques to include provider-based matching to help customer match, link, and enhance records with minimal data movement. With data service providers, we have removed the need for customers to build bespoke integrations,. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchainquery`: [v1.2.0](service/managedblockchainquery/CHANGELOG.md#v120-2023-10-16) + * **Feature**: This release introduces two new APIs: GetAssetContract and ListAssetContracts. This release also adds support for Bitcoin Testnet. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagev2`: [v1.3.0](service/mediapackagev2/CHANGELOG.md#v130-2023-10-16) + * **Feature**: This release allows customers to manage MediaPackage v2 resource using CloudFormation. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.20.0](service/opensearch/CHANGELOG.md#v1200-2023-10-16) + * **Feature**: This release allows customers to list and associate optional plugin packages with compatible Amazon OpenSearch Service clusters for enhanced functionality. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.30.0](service/redshift/CHANGELOG.md#v1300-2023-10-16) + * **Feature**: Added support for managing credentials of provisioned cluster admin using AWS Secrets Manager. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.7.0](service/redshiftserverless/CHANGELOG.md#v170-2023-10-16) + * **Feature**: Added support for managing credentials of serverless namespace admin using AWS Secrets Manager. +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.21.0](service/sesv2/CHANGELOG.md#v1210-2023-10-16) + * **Feature**: This release provides enhanced visibility into your SES identity verification status. This will offer you more actionable insights, enabling you to promptly address any verification-related issues. +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.34.2](service/transfer/CHANGELOG.md#v1342-2023-10-16) + * **Documentation**: Documentation updates for AWS Transfer Family +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.19.0](service/xray/CHANGELOG.md#v1190-2023-10-16) + * **Feature**: This releases enhances GetTraceSummaries API to support new TimeRangeType Service to query trace summaries by segment end time. + +# Release (2023-10-12) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2`: v1.21.2 + * **Bug Fix**: Improve recognition of retryable DNS errors. +* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.45](config/CHANGELOG.md#v11845-2023-10-12) + * **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.27.0](service/auditmanager/CHANGELOG.md#v1270-2023-10-12) + * **Feature**: This release introduces a new limit to the awsAccounts parameter. When you create or update an assessment, there is now a limit of 200 AWS accounts that can be specified in the assessment scope. +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.31.0](service/autoscaling/CHANGELOG.md#v1310-2023-10-12) + * **Feature**: Update the NotificationMetadata field to only allow visible ascii characters. Add paginators to DescribeInstanceRefreshes, DescribeLoadBalancers, and DescribeLoadBalancerTargetGroups +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.37.0](service/configservice/CHANGELOG.md#v1370-2023-10-12) + * **Feature**: Add enums for resource types supported by Config +* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.4.0](service/controltower/CHANGELOG.md#v140-2023-10-12) + * **Feature**: Added new EnabledControl resource details to ListEnabledControls API and added new GetEnabledControl API. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.29.0](service/customerprofiles/CHANGELOG.md#v1290-2023-10-12) + * **Feature**: Adds sensitive trait to various shapes in Customer Profiles Calculated Attribute API model. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.125.0](service/ec2/CHANGELOG.md#v11250-2023-10-12) + * **Feature**: This release adds Ubuntu Pro as a supported platform for On-Demand Capacity Reservations and adds support for setting an Amazon Machine Image (AMI) to disabled state. Disabling the AMI makes it private if it was previously shared, and prevents new EC2 instance launches from it. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.21.6](service/elasticloadbalancingv2/CHANGELOG.md#v1216-2023-10-12) + * **Documentation**: This release enables routing policies with Availability Zone affinity for Network Load Balancers. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.63.0](service/glue/CHANGELOG.md#v1630-2023-10-12) + * **Feature**: Extending version control support to GitLab and Bitbucket from AWSGlue +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.17.0](service/inspector2/CHANGELOG.md#v1170-2023-10-12) + * **Feature**: Add MacOs ec2 platform support +* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.5.0](service/ivsrealtime/CHANGELOG.md#v150-2023-10-12) + * **Feature**: Update GetParticipant to return additional metadata. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.40.0](service/lambda/CHANGELOG.md#v1400-2023-10-12) + * **Feature**: Adds support for Lambda functions to access Dual-Stack subnets over IPv6, via an opt-in flag in CreateFunction and UpdateFunctionConfiguration APIs +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.28.0](service/location/CHANGELOG.md#v1280-2023-10-12) + * **Feature**: This release adds endpoint updates for all AWS Location resource operations. +* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.18.0](service/machinelearning/CHANGELOG.md#v1180-2023-10-12) + * **Feature**: This release marks Password field as sensitive +* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.21.9](service/pricing/CHANGELOG.md#v1219-2023-10-12) + * **Documentation**: Documentation updates for Price List +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.56.0](service/rds/CHANGELOG.md#v1560-2023-10-12) + * **Feature**: This release adds support for adding a dedicated log volume to open-source RDS instances. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.31.0](service/rekognition/CHANGELOG.md#v1310-2023-10-12) + * **Feature**: Amazon Rekognition introduces support for Custom Moderation. This allows the enhancement of accuracy for detect moderation labels operations by creating custom adapters tuned on customer data. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.111.0](service/sagemaker/CHANGELOG.md#v11110-2023-10-12) + * **Feature**: Amazon SageMaker Canvas adds KendraSettings and DirectDeploySettings support for CanvasAppSettings +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.25.0](service/textract/CHANGELOG.md#v1250-2023-10-12) + * **Feature**: This release adds 9 new APIs for adapter and adapter version management, 3 new APIs for tagging, and updates AnalyzeDocument and StartDocumentAnalysis API parameters for using adapters. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.29.0](service/transcribe/CHANGELOG.md#v1290-2023-10-12) + * **Feature**: This release is to enable m4a format to customers +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.31.2](service/workspaces/CHANGELOG.md#v1312-2023-10-12) + * **Documentation**: Updated the CreateWorkspaces action documentation to clarify that the PCoIP protocol is only available for Windows bundles. + +# Release (2023-10-06) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.124.0](service/ec2/CHANGELOG.md#v11240-2023-10-06) + * **Feature**: Documentation updates for Elastic Compute Cloud (EC2). +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.33.0](service/fsx/CHANGELOG.md#v1330-2023-10-06) + * **Feature**: After performing steps to repair the Active Directory configuration of a file system, use this action to initiate the process of attempting to recover to the file system. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.18.0](service/marketplacecatalog/CHANGELOG.md#v1180-2023-10-06) + * **Feature**: This release adds support for Document type as an alternative for stringified JSON for StartChangeSet, DescribeChangeSet and DescribeEntity APIs +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.45.0](service/quicksight/CHANGELOG.md#v1450-2023-10-06) + * **Feature**: NullOption in FilterListConfiguration; Dataset schema/table max length increased; Support total placement for pivot table visual; Lenient mode relaxes the validation to create resources with definition; Data sources can be added to folders; Redshift data sources support IAM Role-based authentication +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.34.0](service/transfer/CHANGELOG.md#v1340-2023-10-06) + * **Feature**: This release updates the max character limit of PreAuthenticationLoginBanner and PostAuthenticationLoginBanner to 4096 characters + +# Release (2023-10-05) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.10.0](service/omics/CHANGELOG.md#v1100-2023-10-05) + * **Feature**: Add Etag Support for Omics Storage in ListReadSets and GetReadSetMetadata API +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.55.1](service/rds/CHANGELOG.md#v1551-2023-10-05) + * **Documentation**: Updates Amazon RDS documentation for corrections and minor improvements. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.30.0](service/route53/CHANGELOG.md#v1300-2023-10-05) + * **Feature**: Add hostedzonetype filter to ListHostedZones API. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.37.0](service/securityhub/CHANGELOG.md#v1370-2023-10-05) + * **Feature**: Added new resource detail objects to ASFF, including resources for AwsEventsEventbus, AwsEventsEndpoint, AwsDmsEndpoint, AwsDmsReplicationTask, AwsDmsReplicationInstance, AwsRoute53HostedZone, and AwsMskCluster +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.21.0](service/storagegateway/CHANGELOG.md#v1210-2023-10-05) + * **Feature**: Add SoftwareVersion to response of DescribeGatewayInformation. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.31.0](service/workspaces/CHANGELOG.md#v1310-2023-10-05) + * **Feature**: This release introduces Manage applications. This feature allows users to manage their WorkSpaces applications by associating or disassociating their WorkSpaces with applications. The DescribeWorkspaces API will now additionally return OperatingSystemName in its responses. + +# Release (2023-10-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.21.0](service/appconfig/CHANGELOG.md#v1210-2023-10-04) + * **Feature**: AWS AppConfig introduces KMS customer-managed key (CMK) encryption support for data saved to AppConfig's hosted configuration store. +* `github.com/aws/aws-sdk-go-v2/service/datazone`: [v1.0.0](service/datazone/CHANGELOG.md#v100-2023-10-04) + * **Release**: New AWS service client module + * **Feature**: Initial release of Amazon DataZone +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.28.0](service/mediatailor/CHANGELOG.md#v1280-2023-10-04) + * **Feature**: Updates DescribeVodSource to include a list of ad break opportunities in the response +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.21.0](service/mgn/CHANGELOG.md#v1210-2023-10-04) + * **Feature**: This release includes the following new APIs: ListConnectors, CreateConnector, UpdateConnector, DeleteConnector and UpdateSourceServer to support the source action framework feature. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.110.0](service/sagemaker/CHANGELOG.md#v11100-2023-10-04) + * **Feature**: Adding support for AdditionalS3DataSource, a data source used for training or inference that is in addition to the input dataset or model data. + +# Release (2023-10-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.69.0](service/connect/CHANGELOG.md#v1690-2023-10-03) + * **Feature**: GetMetricDataV2 API: Update to include new metrics CONTACTS_RESOLVED_IN_X , AVG_HOLD_TIME_ALL_CONTACTS , AVG_RESOLUTION_TIME , ABANDONMENT_RATE , AGENT_NON_RESPONSE_WITHOUT_CUSTOMER_ABANDONS with added features: Interval Period, TimeZone, Negate MetricFilters, Extended date time range. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.27.0](service/location/CHANGELOG.md#v1270-2023-10-03) + * **Feature**: Amazon Location Service adds support for bounding polygon queries. Additionally, the GeofenceCount field has been added to the DescribeGeofenceCollection API response. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.43.0](service/mediaconvert/CHANGELOG.md#v1430-2023-10-03) + * **Feature**: This release adds the ability to replace video frames without modifying the audio essence. +* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.4.0](service/oam/CHANGELOG.md#v140-2023-10-03) + * **Feature**: This release adds support for sharing AWS::ApplicationInsights::Application resources. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.109.0](service/sagemaker/CHANGELOG.md#v11090-2023-10-03) + * **Feature**: This release allows users to run Selective Execution in SageMaker Pipelines without SourcePipelineExecutionArn if selected steps do not have any dependent steps. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.23.0](service/wellarchitected/CHANGELOG.md#v1230-2023-10-03) + * **Feature**: AWS Well-Architected now supports Review Templates that allows you to create templates with pre-filled answers for Well-Architected and Custom Lens best practices. + +# Release (2023-10-02) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/bedrock`: [v1.1.0](service/bedrock/CHANGELOG.md#v110-2023-10-02) + * **Feature**: Provisioned throughput feature with Amazon and third-party base models, and update validators for model identifier and taggable resource ARNs. +* `github.com/aws/aws-sdk-go-v2/service/bedrockruntime`: [v1.1.0](service/bedrockruntime/CHANGELOG.md#v110-2023-10-02) + * **Feature**: Add model timeout exception for InvokeModelWithResponseStream API and update validator for invoke model identifier. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.123.0](service/ec2/CHANGELOG.md#v11230-2023-10-02) + * **Feature**: Introducing Amazon EC2 R7iz instances with 3.9 GHz sustained all-core turbo frequency and deliver up to 20% better performance than previous generation z1d instances. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.16.6](service/managedblockchain/CHANGELOG.md#v1166-2023-10-02) + * **Documentation**: Remove Rinkeby as option from Ethereum APIs +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.55.0](service/rds/CHANGELOG.md#v1550-2023-10-02) + * **Feature**: Adds DefaultCertificateForNewLaunches field in the DescribeCertificates API response. +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.15.0](service/sso/CHANGELOG.md#v1150-2023-10-02) + * **Feature**: Fix FIPS Endpoints in aws-us-gov. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.23.0](service/sts/CHANGELOG.md#v1230-2023-10-02) + * **Feature**: STS API updates for assumeRole +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.33.9](service/transfer/CHANGELOG.md#v1339-2023-10-02) + * **Documentation**: Documentation updates for AWS Transfer Family + +# Release (2023-09-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/bedrock`: [v1.0.0](service/bedrock/CHANGELOG.md#v100-2023-09-28) + * **Release**: New AWS service client module + * **Feature**: Model Invocation logging added to enable or disable logs in customer account. Model listing and description support added. Provisioned Throughput feature added. Custom model support added for creating custom models. Also includes list, and delete functions for custom model. +* `github.com/aws/aws-sdk-go-v2/service/bedrockruntime`: [v1.0.0](service/bedrockruntime/CHANGELOG.md#v100-2023-09-28) + * **Release**: New AWS service client module + * **Feature**: Run Inference: Added support to run the inference on models. Includes set of APIs for running inference in streaming and non-streaming mode. +* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.17.0](service/budgets/CHANGELOG.md#v1170-2023-09-28) + * **Feature**: Update DescribeBudgets and DescribeBudgetNotificationsForAccount MaxResults limit to 1000. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.122.0](service/ec2/CHANGELOG.md#v11220-2023-09-28) + * **Feature**: Adds support for Customer Managed Key encryption for Amazon Verified Access resources +* `github.com/aws/aws-sdk-go-v2/service/iotfleetwise`: [v1.6.0](service/iotfleetwise/CHANGELOG.md#v160-2023-09-28) + * **Feature**: AWS IoT FleetWise now supports encryption through a customer managed AWS KMS key. The PutEncryptionConfiguration and GetEncryptionConfiguration APIs were added. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.108.0](service/sagemaker/CHANGELOG.md#v11080-2023-09-28) + * **Feature**: Online store feature groups supports Standard and InMemory tier storage types for low latency storage for real-time data retrieval. The InMemory tier supports collection types List, Set, and Vector. +* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.18.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1180-2023-09-28) + * **Feature**: Feature Store supports read/write of records with collection type features. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.39.1](service/wafv2/CHANGELOG.md#v1391-2023-09-28) + * **Documentation**: Correct and improve the documentation for the FieldToMatch option JA3 fingerprint. + +# Release (2023-09-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.27.0](service/cognitoidentityprovider/CHANGELOG.md#v1270-2023-09-27) + * **Feature**: The UserPoolType Status field is no longer used. +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.19.0](service/firehose/CHANGELOG.md#v1190-2023-09-27) + * **Feature**: Features : Adding support for new data ingestion source to Kinesis Firehose - AWS Managed Services Kafka. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.40.0](service/iot/CHANGELOG.md#v1400-2023-09-27) + * **Feature**: Added support for IoT Rules Engine Kafka Action Headers +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.24.0](service/textract/CHANGELOG.md#v1240-2023-09-27) + * **Feature**: This release adds new feature - Layout to Analyze Document API which can automatically extract layout elements such as titles, paragraphs, headers, section headers, lists, page numbers, footers, table areas, key-value areas and figure areas and order the elements as a human would read. + +# Release (2023-09-26) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.18.0](service/appintegrations/CHANGELOG.md#v1180-2023-09-26) + * **Feature**: The Amazon AppIntegrations service adds a set of APIs (in preview) to manage third party applications to be used in Amazon Connect agent workspace. +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.21.0](service/apprunner/CHANGELOG.md#v1210-2023-09-26) + * **Feature**: This release allows an App Runner customer to specify a custom source directory to run the build & start command. This change allows App Runner to support monorepo based repositories +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.18.1](service/codedeploy/CHANGELOG.md#v1181-2023-09-26) + * **Documentation**: CodeDeploy now supports In-place and Blue/Green EC2 deployments with multiple Classic Load Balancers and multiple Target Groups. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.68.0](service/connect/CHANGELOG.md#v1680-2023-09-26) + * **Feature**: This release updates a set of Amazon Connect APIs that provides the ability to integrate third party applications in the Amazon Connect agent workspace. +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.22.0](service/dynamodb/CHANGELOG.md#v1220-2023-09-26) + * **Feature**: Amazon DynamoDB now supports Incremental Export as an enhancement to the existing Export Table +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.121.0](service/ec2/CHANGELOG.md#v11210-2023-09-26) + * **Feature**: The release includes AWS verified access to support FIPs compliance in North America regions +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.24.0](service/lakeformation/CHANGELOG.md#v1240-2023-09-26) + * **Feature**: This release adds three new API support "CreateLakeFormationOptIn", "DeleteLakeFormationOptIn" and "ListLakeFormationOptIns", and also updates the corresponding documentation. +* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.22.6](service/pinpoint/CHANGELOG.md#v1226-2023-09-26) + * **Documentation**: Update documentation for RemoveAttributes to more accurately reflect its behavior when attributes are deleted. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.40.0](service/s3/CHANGELOG.md#v1400-2023-09-26) + * **Feature**: This release adds a new field COMPLETED to the ReplicationStatus Enum. You can now use this field to validate the replication status of S3 objects using the AWS SDK. + +# Release (2023-09-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.13.0](service/amplifyuibuilder/CHANGELOG.md#v1130-2023-09-25) + * **Feature**: Support for generating code that is compatible with future versions of amplify project dependencies. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.9.0](service/chimesdkmediapipelines/CHANGELOG.md#v190-2023-09-25) + * **Feature**: Adds support for sending WebRTC audio to Amazon Kineses Video Streams. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.11.0](service/emrserverless/CHANGELOG.md#v1110-2023-09-25) + * **Feature**: This release adds support for application-wide default job configurations. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.17.0](service/finspacedata/CHANGELOG.md#v1170-2023-09-25) + * **Feature**: Adding sensitive trait to attributes. Change max SessionDuration from 720 to 60. Correct "ApiAccess" attribute to "apiAccess" to maintain consistency between APIs. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.44.0](service/quicksight/CHANGELOG.md#v1440-2023-09-25) + * **Feature**: Added ability to tag users upon creation. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.38.0](service/ssm/CHANGELOG.md#v1380-2023-09-25) + * **Feature**: This release updates the enum values for ResourceType in SSM DescribeInstanceInformation input and ConnectionStatus in GetConnectionStatus output. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.39.0](service/wafv2/CHANGELOG.md#v1390-2023-09-25) + * **Feature**: You can now perform an exact match against the web request's JA3 fingerprint. + +# Release (2023-09-22) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.42](config/CHANGELOG.md#v11842-2023-09-22) + * **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. + * **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. +* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.3.43](internal/ini/CHANGELOG.md#v1343-2023-09-22) + * **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. + * **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.20.0](service/braket/CHANGELOG.md#v1200-2023-09-22) + * **Feature**: This release adds support to view the device queue depth (the number of queued quantum tasks and hybrid jobs on a device) and queue position for a quantum task and hybrid job. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.18.0](service/cloudwatchevents/CHANGELOG.md#v1180-2023-09-22) + * **Feature**: Adds sensitive trait to various shapes in Jetstream Connections API model. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.31.0](service/databasemigrationservice/CHANGELOG.md#v1310-2023-09-22) + * **Feature**: new vendors for DMS CSF: MongoDB, MariaDB, DocumentDb and Redshift +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.120.0](service/ec2/CHANGELOG.md#v11200-2023-09-22) + * **Feature**: EC2 M2 Pro Mac instances are powered by Apple M2 Pro Mac Mini computers featuring 12 core CPU, 19 core GPU, 32 GiB of memory, and 16 core Apple Neural Engine and uniquely enabled by the AWS Nitro System through high-speed Thunderbolt connections. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.21.7](service/efs/CHANGELOG.md#v1217-2023-09-22) + * **Documentation**: Documentation updates for Elastic File System +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.28.0](service/guardduty/CHANGELOG.md#v1280-2023-09-22) + * **Feature**: Add `EKS_CLUSTER_NAME` to filter and sort key. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.42.0](service/mediaconvert/CHANGELOG.md#v1420-2023-09-22) + * **Feature**: This release supports the creation of of audio-only tracks in CMAF output groups. + +# Release (2023-09-20) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.20.0](service/appconfig/CHANGELOG.md#v1200-2023-09-20) + * **Feature**: Enabling boto3 paginators for list APIs and adding documentation around ServiceQuotaExceededException errors +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.20.0](service/apprunner/CHANGELOG.md#v1200-2023-09-20) + * **Feature**: This release adds improvements for managing App Runner auto scaling configuration resources. New APIs: UpdateDefaultAutoScalingConfiguration and ListServicesForAutoScalingConfiguration. Updated API: DeleteAutoScalingConfiguration. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.24.0](service/cloudwatchlogs/CHANGELOG.md#v1240-2023-09-20) + * **Feature**: Add ClientToken to QueryDefinition CFN Handler in CWL +* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.20.0](service/codeartifact/CHANGELOG.md#v1200-2023-09-20) + * **Feature**: Add support for the Swift package format. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.18.4](service/kinesisvideo/CHANGELOG.md#v1184-2023-09-20) + * **Documentation**: Updated DescribeMediaStorageConfiguration, StartEdgeConfigurationUpdate, ImageGenerationConfiguration$SamplingInterval, and UpdateMediaStorageConfiguration to match AWS Docs. +* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.39.0](service/s3/CHANGELOG.md#v1390-2023-09-20) + * **Feature**: Fix an issue where the SDK can fail to unmarshall response due to NumberFormatException +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.24.0](service/servicediscovery/CHANGELOG.md#v1240-2023-09-20) + * **Feature**: Adds a new DiscoverInstancesRevision API and also adds InstanceRevision field to the DiscoverInstances API response. +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.17.0](service/ssooidc/CHANGELOG.md#v1170-2023-09-20) + * **Feature**: Update FIPS endpoints in aws-us-gov. + +# Release (2023-09-19) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.119.0](service/ec2/CHANGELOG.md#v11190-2023-09-19) + * **Feature**: This release adds support for C7i, and R7a instance types. +* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.30.0](service/outposts/CHANGELOG.md#v1300-2023-09-19) + * **Feature**: This release adds the InstanceFamilies field to the ListAssets response. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.107.0](service/sagemaker/CHANGELOG.md#v11070-2023-09-19) + * **Feature**: This release adds support for one-time model monitoring schedules that are executed immediately without delay, explicit data analysis windows for model monitoring schedules and exclude features attributes to remove features from model monitor analysis. + +# Release (2023-09-18) + +## General Highlights +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.21.0](service/accessanalyzer/CHANGELOG.md#v1210-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.19.0](service/acm/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.15.0](service/amplify/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.13.0](service/apigatewaymanagementapi/CHANGELOG.md#v1130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.19.0](service/appconfig/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.8.0](service/appconfigdata/CHANGELOG.md#v180-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/appfabric`: [v1.2.0](service/appfabric/CHANGELOG.md#v120-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.17.0](service/appintegrations/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.12.0](service/applicationcostprofiler/CHANGELOG.md#v1120-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.19.0](service/appmesh/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.3.0](service/arczonalshift/CHANGELOG.md#v130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.15.0](service/autoscalingplans/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.11.0](service/backupgateway/CHANGELOG.md#v1110-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.3.0](service/backupstorage/CHANGELOG.md#v130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.19.0](service/braket/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.9.0](service/chimesdkvoice/CHANGELOG.md#v190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.15.0](service/clouddirectory/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.16.0](service/cloudhsmv2/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.16.0](service/cloudsearch/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.14.0](service/cloudsearchdomain/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/cloudtraildata`: [v1.2.0](service/cloudtraildata/CHANGELOG.md#v120-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.22.0](service/codebuild/CHANGELOG.md#v1220-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.18.0](service/codedeploy/CHANGELOG.md#v1180-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.15.0](service/codeguruprofiler/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.19.0](service/codegurureviewer/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/codegurusecurity`: [v1.2.0](service/codegurusecurity/CHANGELOG.md#v120-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.15.0](service/codestar/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.16.0](service/codestarnotifications/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.17.0](service/cognitoidentity/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.14.0](service/cognitosync/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.7.0](service/connectcases/CHANGELOG.md#v170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.15.0](service/connectcontactlens/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.3.0](service/controltower/CHANGELOG.md#v130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.23.0](service/databrew/CHANGELOG.md#v1230-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.21.0](service/dataexchange/CHANGELOG.md#v1210-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.16.0](service/datapipeline/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.14.0](service/dax/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.17.0](service/devicefarm/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.3.0](service/docdbelastic/CHANGELOG.md#v130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.17.0](service/ec2instanceconnect/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.18.0](service/ecrpublic/CHANGELOG.md#v1180-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.17.0](service/elasticbeanstalk/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.17.0](service/elasticloadbalancing/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.16.0](service/elastictranscoder/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.13.0](service/evidently/CHANGELOG.md#v1130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.16.0](service/finspacedata/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.16.0](service/fis/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.27.0](service/forecast/CHANGELOG.md#v1270-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.15.0](service/forecastquery/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.4.0](service/gamesparks/CHANGELOG.md#v140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.16.0](service/glacier/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.17.0](service/greengrass/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.24.0](service/greengrassv2/CHANGELOG.md#v1240-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.15.0](service/honeycode/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.15.0](service/inspector/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.13.0](service/iot1clickdevicesservice/CHANGELOG.md#v1130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.14.0](service/iot1clickprojects/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.16.0](service/iotanalytics/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.17.0](service/iotevents/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.15.0](service/ioteventsdata/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.15.0](service/iotfleethub/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.14.0](service/iotjobsdataplane/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.3.0](service/iotroborunner/CHANGELOG.md#v130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.17.0](service/iotsecuretunneling/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.16.0](service/iotthingsgraph/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.6.0](service/ivschat/CHANGELOG.md#v160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/kendraranking`: [v1.2.0](service/kendraranking/CHANGELOG.md#v120-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.19.0](service/kinesis/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.16.0](service/kinesisanalytics/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.17.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.13.0](service/kinesisvideomedia/CHANGELOG.md#v1130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.13.0](service/kinesisvideosignaling/CHANGELOG.md#v1130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/kinesisvideowebrtcstorage`: [v1.4.0](service/kinesisvideowebrtcstorage/CHANGELOG.md#v140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.19.0](service/lexmodelbuildingservice/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.15.0](service/lexruntimeservice/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.19.0](service/lexruntimev2/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.20.0](service/licensemanager/CHANGELOG.md#v1200-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerlinuxsubscriptions`: [v1.3.0](service/licensemanagerlinuxsubscriptions/CHANGELOG.md#v130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.4.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.21.0](service/lookoutmetrics/CHANGELOG.md#v1210-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.17.0](service/lookoutvision/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.17.0](service/machinelearning/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.17.0](service/macie/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.29.7](service/macie2/CHANGELOG.md#v1297-2023-09-18) + * **Documentation**: This release changes the default managedDataIdentifierSelector setting for new classification jobs to RECOMMENDED. By default, new classification jobs now use the recommended set of managed data identifiers. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.14.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.14.0](service/marketplaceentitlementservice/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.16.0](service/marketplacemetering/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagev2`: [v1.2.0](service/mediapackagev2/CHANGELOG.md#v120-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.24.0](service/mediapackagevod/CHANGELOG.md#v1240-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.15.0](service/mediastore/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.15.0](service/mediastoredata/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/medicalimaging`: [v1.2.0](service/medicalimaging/CHANGELOG.md#v120-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.15.0](service/migrationhub/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.15.0](service/migrationhubconfig/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/migrationhuborchestrator`: [v1.3.0](service/migrationhuborchestrator/CHANGELOG.md#v130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.11.0](service/migrationhubstrategy/CHANGELOG.md#v1110-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.14.0](service/mobile/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.16.0](service/mturk/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.22.0](service/neptune/CHANGELOG.md#v1220-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.19.0](service/networkmanager/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.18.0](service/nimble/CHANGELOG.md#v1180-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.3.0](service/oam/CHANGELOG.md#v130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.5.0](service/opensearchserverless/CHANGELOG.md#v150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.16.0](service/opsworks/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.17.0](service/opsworkscm/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/osis`: [v1.2.0](service/osis/CHANGELOG.md#v120-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.13.0](service/panorama/CHANGELOG.md#v1130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/paymentcryptography`: [v1.2.0](service/paymentcryptography/CHANGELOG.md#v120-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.15.0](service/personalizeevents/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.15.0](service/personalizeruntime/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.14.0](service/pinpointemail/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.13.0](service/pinpointsmsvoice/CHANGELOG.md#v1130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.3.0](service/pinpointsmsvoicev2/CHANGELOG.md#v130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.4.0](service/pipes/CHANGELOG.md#v140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.16.0](service/qldbsession/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.10.0](service/rbin/CHANGELOG.md#v1100-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.15.0](service/rdsdata/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.6.0](service/redshiftserverless/CHANGELOG.md#v160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.4.0](service/resourceexplorer2/CHANGELOG.md#v140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.16.0](service/resourcegroups/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.16.0](service/resourcegroupstaggingapi/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.20.0](service/robomaker/CHANGELOG.md#v1200-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.13.0](service/route53recoverycluster/CHANGELOG.md#v1130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.13.0](service/route53recoverycontrolconfig/CHANGELOG.md#v1130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.11.0](service/route53recoveryreadiness/CHANGELOG.md#v1110-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.20.0](service/route53resolver/CHANGELOG.md#v1200-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.12.0](service/rum/CHANGELOG.md#v1120-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.18.0](service/s3outposts/CHANGELOG.md#v1180-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.17.0](service/sagemakera2iruntime/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.15.0](service/sagemakeredge/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.5.0](service/sagemakergeospatial/CHANGELOG.md#v150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/sagemakermetrics`: [v1.2.0](service/sagemakermetrics/CHANGELOG.md#v120-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.3.0](service/scheduler/CHANGELOG.md#v130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.17.0](service/schemas/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.14.0](service/serverlessapplicationrepository/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.19.0](service/servicecatalogappregistry/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.23.0](service/servicediscovery/CHANGELOG.md#v1230-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.20.0](service/shield/CHANGELOG.md#v1200-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.15.0](service/sms/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.11.0](service/snowdevicemanagement/CHANGELOG.md#v1110-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.22.0](service/sns/CHANGELOG.md#v1220-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.17.0](service/ssmcontacts/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.23.0](service/ssmincidents/CHANGELOG.md#v1230-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.5.0](service/ssmsap/CHANGELOG.md#v150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.14.0](service/sso/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.16.0](service/ssooidc/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.20.0](service/storagegateway/CHANGELOG.md#v1200-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.22.0](service/sts/CHANGELOG.md#v1220-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.4.0](service/supportapp/CHANGELOG.md#v140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.19.0](service/synthetics/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.23.0](service/textract/CHANGELOG.md#v1230-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.17.0](service/timestreamquery/CHANGELOG.md#v1170-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.19.0](service/timestreamwrite/CHANGELOG.md#v1190-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/tnb`: [v1.3.0](service/tnb/CHANGELOG.md#v130-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.11.0](service/transcribestreaming/CHANGELOG.md#v1110-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.15.0](service/voiceid/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.14.0](service/waf/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.15.0](service/wafregional/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.16.0](service/workdocs/CHANGELOG.md#v1160-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.15.0](service/worklink/CHANGELOG.md#v1150-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.20.0](service/workmail/CHANGELOG.md#v1200-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. + * **Feature**: This release includes four new APIs UpdateUser, UpdateGroup, ListGroupsForEntity and DescribeEntity, along with RemoteUsers and some enhancements to existing APIs. +* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.14.0](service/workmailmessageflow/CHANGELOG.md#v1140-2023-09-18) + * **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. + +# Release (2023-09-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.24.0](service/appstream/CHANGELOG.md#v1240-2023-09-15) + * **Feature**: This release introduces app block builder, allowing customers to provision a resource to package applications into an app block +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.67.0](service/connect/CHANGELOG.md#v1670-2023-09-15) + * **Feature**: New rule type (OnMetricDataUpdate) has been added +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.29.1](service/datasync/CHANGELOG.md#v1291-2023-09-15) + * **Documentation**: Documentation-only updates for AWS DataSync. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.106.0](service/sagemaker/CHANGELOG.md#v11060-2023-09-15) + * **Feature**: This release introduces Skip Model Validation for Model Packages + +# Release (2023-09-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.23.0](service/appstream/CHANGELOG.md#v1230-2023-09-14) + * **Feature**: This release introduces multi-session fleets, allowing customers to provision more than one user session on a single fleet instance. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.34.6](service/cloudformation/CHANGELOG.md#v1346-2023-09-14) + * **Documentation**: Documentation updates for AWS CloudFormation +* `github.com/aws/aws-sdk-go-v2/service/entityresolution`: [v1.2.0](service/entityresolution/CHANGELOG.md#v120-2023-09-14) + * **Feature**: Changed "ResolutionTechniques" and "MappedInputFields" in workflow and schema mapping operations to be required fields. +* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.19.0](service/lookoutequipment/CHANGELOG.md#v1190-2023-09-14) + * **Feature**: This release adds APIs for the new scheduled retraining feature. + +# Release (2023-09-13) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.18.8](service/cloud9/CHANGELOG.md#v1188-2023-09-13) + * **Documentation**: Update to include information on Ubuntu 18 deprecation. +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.16.0](service/drs/CHANGELOG.md#v1160-2023-09-13) + * **Feature**: Updated existing APIs and added new ones to support using AWS Elastic Disaster Recovery post-launch actions. Added support for new regions. +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.18.0](service/firehose/CHANGELOG.md#v1180-2023-09-13) + * **Feature**: DocumentIdOptions has been added for the Amazon OpenSearch destination. +* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.27.0](service/guardduty/CHANGELOG.md#v1270-2023-09-13) + * **Feature**: Add `managementType` field to ListCoverage API response. +* `github.com/aws/aws-sdk-go-v2/service/internetmonitor`: [v1.6.0](service/internetmonitor/CHANGELOG.md#v160-2023-09-13) + * **Feature**: This release updates the Amazon CloudWatch Internet Monitor API domain name. +* `github.com/aws/aws-sdk-go-v2/service/ivsrealtime`: [v1.4.4](service/ivsrealtime/CHANGELOG.md#v144-2023-09-13) + * **Documentation**: Doc only update that changes description for ParticipantToken. +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.5.1](service/simspaceweaver/CHANGELOG.md#v151-2023-09-13) + * **Documentation**: Edited the introductory text for the API reference. +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.18.0](service/xray/CHANGELOG.md#v1180-2023-09-13) + * **Feature**: Add StartTime field in GetTraceSummaries API response for each TraceSummary. + +# Release (2023-09-12) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.118.0](service/ec2/CHANGELOG.md#v11180-2023-09-12) + * **Feature**: This release adds support for restricting public sharing of AMIs through AMI Block Public Access +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.22.0](service/eventbridge/CHANGELOG.md#v1220-2023-09-12) + * **Feature**: Adds sensitive trait to various shapes in Jetstream Connections API model. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.43.0](service/kendra/CHANGELOG.md#v1430-2023-09-12) + * **Feature**: Amazon Kendra now supports confidence score buckets for retrieved passage results using the Retrieve API. + +# Release (2023-09-11) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.20.0](service/ecr/CHANGELOG.md#v1200-2023-09-11) + * **Feature**: This release will have ValidationException be thrown from ECR LifecyclePolicy APIs in regions LifecyclePolicy is not supported, this includes existing Amazon Dedicated Cloud (ADC) regions. This release will also change Tag: TagValue and Tag: TagKey to required. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.37.0](service/medialive/CHANGELOG.md#v1370-2023-09-11) + * **Feature**: AWS Elemental Link now supports attaching a Link UHD device to a MediaConnect flow. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.43.0](service/quicksight/CHANGELOG.md#v1430-2023-09-11) + * **Feature**: This release launches new updates to QuickSight KPI visuals - support for sparklines, new templated layout and new targets for conditional formatting rules. + +# Release (2023-09-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.32.6](service/fsx/CHANGELOG.md#v1326-2023-09-08) + * **Documentation**: Amazon FSx documentation fixes +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.105.0](service/sagemaker/CHANGELOG.md#v11050-2023-09-08) + * **Feature**: Autopilot APIs will now support holiday featurization for Timeseries models. The models will now hold holiday metadata and should be able to accommodate holiday effect during inference. +* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.18.0](service/ssoadmin/CHANGELOG.md#v1180-2023-09-08) + * **Feature**: Content updates to IAM Identity Center API for China Regions. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.30.0](service/workspaces/CHANGELOG.md#v1300-2023-09-08) + * **Feature**: A new field "ErrorDetails" will be added to the output of "DescribeWorkspaceImages" API call. This field provides in-depth details about the error occurred during image import process. These details include the possible causes of the errors and troubleshooting information. + +# Release (2023-09-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.36.2](service/securityhub/CHANGELOG.md#v1362-2023-09-07) + * **Documentation**: Documentation updates for AWS Security Hub +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.5.0](service/simspaceweaver/CHANGELOG.md#v150-2023-09-07) + * **Feature**: BucketName and ObjectKey are now required for the S3Location data type. BucketName is now required for the S3Destination data type. + +# Release (2023-09-06) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.35.0](service/appflow/CHANGELOG.md#v1350-2023-09-06) + * **Feature**: Adding OAuth2.0 support for servicenow connector. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.117.0](service/ec2/CHANGELOG.md#v11170-2023-09-06) + * **Feature**: This release adds 'outpost' location type to the DescribeInstanceTypeOfferings API, allowing customers that have been allowlisted for outpost to query their offerings in the API. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.21.4](service/elasticloadbalancingv2/CHANGELOG.md#v1214-2023-09-06) + * **Documentation**: This release enables default UDP connection termination and disabling unhealthy target connection termination for Network Load Balancers. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.36.0](service/medialive/CHANGELOG.md#v1360-2023-09-06) + * **Feature**: Adds advanced Output Locking options for Epoch Locking: Custom Epoch and Jam Sync Time +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.38.0](service/wafv2/CHANGELOG.md#v1380-2023-09-06) + * **Feature**: The targeted protection level of the Bot Control managed rule group now provides optional, machine-learning analysis of traffic statistics to detect some bot-related activity. You can enable or disable the machine learning functionality through the API. + +# Release (2023-09-05) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.9.0](service/billingconductor/CHANGELOG.md#v190-2023-09-05) + * **Feature**: This release adds support for line item filtering in for the custom line item resource. +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.18.7](service/cloud9/CHANGELOG.md#v1187-2023-09-05) + * **Documentation**: Added support for Ubuntu 22.04 that was not picked up in a previous Trebuchet request. Doc-only update. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.27.0](service/computeoptimizer/CHANGELOG.md#v1270-2023-09-05) + * **Feature**: This release adds support to provide recommendations for G4dn and P3 instances that use NVIDIA GPUs. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.116.0](service/ec2/CHANGELOG.md#v11160-2023-09-05) + * **Feature**: Introducing Amazon EC2 C7gd, M7gd, and R7gd Instances with up to 3.8 TB of local NVMe-based SSD block-level storage. These instances are powered by AWS Graviton3 processors, delivering up to 25% better performance over Graviton2-based instances. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.30.1](service/ecs/CHANGELOG.md#v1301-2023-09-05) + * **Documentation**: Documentation only update for Amazon ECS. +* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.21.0](service/eventbridge/CHANGELOG.md#v1210-2023-09-05) + * **Feature**: Improve Endpoint Ruleset test coverage. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.54.0](service/rds/CHANGELOG.md#v1540-2023-09-05) + * **Feature**: Add support for feature integration with AWS Backup. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.104.0](service/sagemaker/CHANGELOG.md#v11040-2023-09-05) + * **Feature**: SageMaker Neo now supports data input shape derivation for Pytorch 2.0 and XGBoost compilation job for cloud instance targets. You can skip DataInputConfig field during compilation job creation. You can also access derived information from model in DescribeCompilationJob response. +* `github.com/aws/aws-sdk-go-v2/service/vpclattice`: [v1.2.0](service/vpclattice/CHANGELOG.md#v120-2023-09-05) + * **Feature**: This release adds Lambda event structure version config support for LAMBDA target groups. It also adds newline support for auth policies. + +# Release (2023-09-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.8.0](service/chimesdkmediapipelines/CHANGELOG.md#v180-2023-09-01) + * **Feature**: This release adds support for the Voice Analytics feature for customer-owned KVS streams as part of the Amazon Chime SDK call analytics. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.66.0](service/connect/CHANGELOG.md#v1660-2023-09-01) + * **Feature**: Amazon Connect adds the ability to read, create, update, delete, and list view resources, and adds the ability to read, create, delete, and list view versions. +* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.18.0](service/identitystore/CHANGELOG.md#v1180-2023-09-01) + * **Feature**: New Identity Store content for China Region launch +* `github.com/aws/aws-sdk-go-v2/service/neptunedata`: [v1.0.1](service/neptunedata/CHANGELOG.md#v101-2023-09-01) + * **Documentation**: Removed the descriptive text in the introduction. + +# Release (2023-08-31) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.7.0](service/chimesdkmediapipelines/CHANGELOG.md#v170-2023-08-31) + * **Feature**: This release adds support for feature Voice Enhancement for Call Recording as part of Amazon Chime SDK call analytics. +* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.15.0](service/cloudhsm/CHANGELOG.md#v1150-2023-08-31) + * **Feature**: Deprecating CloudHSM Classic API Service. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.17.0](service/cloudwatchevents/CHANGELOG.md#v1170-2023-08-31) + * **Feature**: Documentation updates for CloudWatch Events. +* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.4.0](service/connectcampaigns/CHANGELOG.md#v140-2023-08-31) + * **Feature**: Amazon Connect outbound campaigns has launched agentless dialing mode which enables customers to make automated outbound calls without agent engagement. This release updates three of the campaign management API's to support the new agentless dialing mode and the new dialing capacity field. +* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.17.0](service/connectparticipant/CHANGELOG.md#v1170-2023-08-31) + * **Feature**: Amazon Connect Participant Service adds the ability to get a view resource using a view token, which is provided in a participant message, with the release of the DescribeView API. +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.28.0](service/customerprofiles/CHANGELOG.md#v1280-2023-08-31) + * **Feature**: Adds sensitive trait to various shapes in Customer Profiles API model. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.30.0](service/ecs/CHANGELOG.md#v1300-2023-08-31) + * **Feature**: This release adds support for an account-level setting that you can use to configure the number of days for AWS Fargate task retirement. +* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.19.0](service/health/CHANGELOG.md#v1190-2023-08-31) + * **Feature**: Adds new API DescribeEntityAggregatesForOrganization that retrieves entity aggregates across your organization. Also adds support for resource status filtering in DescribeAffectedEntitiesForOrganization, resource status aggregates in the DescribeEntityAggregates response, and new resource statuses. +* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.26.0](service/ivs/CHANGELOG.md#v1260-2023-08-31) + * **Feature**: Updated "type" description for CreateChannel, UpdateChannel, Channel, and ChannelSummary. +* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.11.0](service/kafkaconnect/CHANGELOG.md#v1110-2023-08-31) + * **Feature**: Minor model changes for Kafka Connect as well as endpoint updates. +* `github.com/aws/aws-sdk-go-v2/service/paymentcryptographydata`: [v1.2.0](service/paymentcryptographydata/CHANGELOG.md#v120-2023-08-31) + * **Feature**: Make KeyCheckValue field optional when using asymmetric keys as Key Check Values typically only apply to symmetric keys +* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.21.0](service/sagemakerruntime/CHANGELOG.md#v1210-2023-08-31) + * **Feature**: This release adds a new InvokeEndpointWithResponseStream API to support streaming of model responses. + +# Release (2023-08-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.34.0](service/appflow/CHANGELOG.md#v1340-2023-08-30) + * **Feature**: Add SAP source connector parallel and pagination feature +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.19.0](service/apprunner/CHANGELOG.md#v1190-2023-08-30) + * **Feature**: App Runner adds support for Bitbucket. You can now create App Runner connection that connects to your Bitbucket repositories and deploy App Runner service with the source code stored in a Bitbucket repository. +* `github.com/aws/aws-sdk-go-v2/service/cleanrooms`: [v1.4.0](service/cleanrooms/CHANGELOG.md#v140-2023-08-30) + * **Feature**: This release decouples member abilities in a collaboration. With this change, the member who can run queries no longer needs to be the same as the member who can receive results. +* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.29.0](service/datasync/CHANGELOG.md#v1290-2023-08-30) + * **Feature**: AWS DataSync introduces Task Reports, a new feature that provides detailed reports of data transfer operations for each task execution. +* `github.com/aws/aws-sdk-go-v2/service/neptunedata`: [v1.0.0](service/neptunedata/CHANGELOG.md#v100-2023-08-30) + * **Release**: New AWS service client module + * **Feature**: Allows customers to execute data plane actions like bulk loading graphs, issuing graph queries using Gremlin and openCypher directly from the SDK. +* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.30.0](service/networkfirewall/CHANGELOG.md#v1300-2023-08-30) + * **Feature**: Network Firewall increasing pagination token string length +* `github.com/aws/aws-sdk-go-v2/service/pcaconnectorad`: [v1.0.0](service/pcaconnectorad/CHANGELOG.md#v100-2023-08-30) + * **Release**: New AWS service client module + * **Feature**: The Connector for AD allows you to use a fully-managed AWS Private CA as a drop-in replacement for your self-managed enterprise CAs without local agents or proxy servers. Enterprises that use AD to manage Windows environments can reduce their private certificate authority (CA) costs and complexity. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.103.0](service/sagemaker/CHANGELOG.md#v11030-2023-08-30) + * **Feature**: Amazon SageMaker Canvas adds IdentityProviderOAuthSettings support for CanvasAppSettings + +# Release (2023-08-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.26.0](service/cognitoidentityprovider/CHANGELOG.md#v1260-2023-08-29) + * **Feature**: Added API example requests and responses for several operations. Fixed the validation regex for user pools Identity Provider name. +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.32.5](service/fsx/CHANGELOG.md#v1325-2023-08-29) + * **Documentation**: Documentation updates for project quotas. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.9.0](service/omics/CHANGELOG.md#v190-2023-08-29) + * **Feature**: Add RetentionMode support for Runs. +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.20.0](service/sesv2/CHANGELOG.md#v1200-2023-08-29) + * **Feature**: Adds support for the new Export and Message Insights features: create, get, list and cancel export jobs; get message insights. + +# Release (2023-08-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.25.0](service/backup/CHANGELOG.md#v1250-2023-08-28) + * **Feature**: Add support for customizing time zone for backup window in backup plan rules. +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.26.0](service/computeoptimizer/CHANGELOG.md#v1260-2023-08-28) + * **Feature**: This release enables AWS Compute Optimizer to analyze and generate licensing optimization recommendations for sql server running on EC2 instances. +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.20.6](service/organizations/CHANGELOG.md#v1206-2023-08-28) + * **Documentation**: Documentation updates for permissions and links. +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.7.0](service/securitylake/CHANGELOG.md#v170-2023-08-28) + * **Feature**: Remove incorrect regex enforcement on pagination tokens. +* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.16.0](service/servicequotas/CHANGELOG.md#v1160-2023-08-28) + * **Feature**: Service Quotas now supports viewing the applied quota value and requesting a quota increase for a specific resource in an AWS account. +* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.12.0](service/workspacesweb/CHANGELOG.md#v1120-2023-08-28) + * **Feature**: WorkSpaces Web now enables Admins to configure which cookies are synchronized from an end-user's local browser to the in-session browser. In conjunction with a browser extension, this feature enables enhanced Single-Sign On capability by reducing the number of times an end-user has to authenticate. + +# Release (2023-08-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.29.0](service/cloudtrail/CHANGELOG.md#v1290-2023-08-25) + * **Feature**: Add ThrottlingException with error code 429 to handle CloudTrail Delegated Admin request rate exceeded on organization resources. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.27.7](service/cloudwatch/CHANGELOG.md#v1277-2023-08-25) + * **Documentation**: Doc-only update to get doc bug fixes into the SDK docs + +# Release (2023-08-24) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.115.0](service/ec2/CHANGELOG.md#v11150-2023-08-24) + * **Feature**: Amazon EC2 M7a instances, powered by 4th generation AMD EPYC processors, deliver up to 50% higher performance compared to M6a instances. Amazon EC2 Hpc7a instances, powered by 4th Gen AMD EPYC processors, deliver up to 2.5x better performance compared to Amazon EC2 Hpc6a instances. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.62.0](service/glue/CHANGELOG.md#v1620-2023-08-24) + * **Feature**: Added API attributes that help in the monitoring of sessions. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.41.0](service/mediaconvert/CHANGELOG.md#v1410-2023-08-24) + * **Feature**: This release includes additional audio channel tags in Quicktime outputs, support for film grain synthesis for AV1 outputs, ability to create audio-only FLAC outputs, and ability to specify Amazon S3 destination storage class. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.35.0](service/medialive/CHANGELOG.md#v1350-2023-08-24) + * **Feature**: MediaLive now supports passthrough of KLV data to a HLS output group with a TS container. MediaLive now supports setting an attenuation mode for AC3 audio when the coding mode is 3/2 LFE. MediaLive now supports specifying whether to include filler NAL units in RTMP output group settings. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.27.0](service/mediatailor/CHANGELOG.md#v1270-2023-08-24) + * **Feature**: Adds new source location AUTODETECT_SIGV4 access type. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.42.0](service/quicksight/CHANGELOG.md#v1420-2023-08-24) + * **Feature**: Excel support in Snapshot Export APIs. Removed Required trait for some insight Computations. Namespace-shared Folders support. Global Filters support. Table pin Column support. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.53.0](service/rds/CHANGELOG.md#v1530-2023-08-24) + * **Feature**: This release updates the supported versions for Percona XtraBackup in Aurora MySQL. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.33.0](service/s3control/CHANGELOG.md#v1330-2023-08-24) + * **Feature**: Updates to endpoint ruleset tests to address Smithy validation issues and standardize the capitalization of DualStack. +* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.2.1](service/verifiedpermissions/CHANGELOG.md#v121-2023-08-24) + * **Documentation**: Documentation updates for Amazon Verified Permissions. + +# Release (2023-08-23) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.18.0](service/apigateway/CHANGELOG.md#v1180-2023-08-23) + * **Feature**: This release adds RootResourceId to GetRestApi response. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.31.0](service/polly/CHANGELOG.md#v1310-2023-08-23) + * **Feature**: Amazon Polly adds 1 new voice - Zayd (ar-AE) + +# Release (2023-08-22) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.28.0](service/costexplorer/CHANGELOG.md#v1280-2023-08-22) + * **Feature**: This release adds the LastUpdatedDate and LastUsedDate timestamps to help you manage your cost allocation tags. +* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.17.7](service/globalaccelerator/CHANGELOG.md#v1177-2023-08-22) + * **Documentation**: Global Accelerator now supports Client Ip Preservation for Network Load Balancer endpoints. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.52.0](service/rds/CHANGELOG.md#v1520-2023-08-22) + * **Feature**: Adding parameters to CreateCustomDbEngineVersion reserved for future use. +* `github.com/aws/aws-sdk-go-v2/service/verifiedpermissions`: [v1.2.0](service/verifiedpermissions/CHANGELOG.md#v120-2023-08-22) + * **Feature**: Documentation updates for Amazon Verified Permissions. Increases max results per page for ListPolicyStores, ListPolicies, and ListPolicyTemplates APIs from 20 to 50. + # Release (2023-08-21) ## General Highlights diff --git a/vendor/github.com/aws/aws-sdk-go-v2/Makefile b/vendor/github.com/aws/aws-sdk-go-v2/Makefile index 9dc36fe4e..e089ad351 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/Makefile +++ b/vendor/github.com/aws/aws-sdk-go-v2/Makefile @@ -10,7 +10,7 @@ BUILD_TAGS=-tags "example,codegen,integration,ec2env,perftest" SMITHY_GO_SRC ?= $(shell pwd)/../smithy-go -SDK_MIN_GO_VERSION ?= 1.15 +SDK_MIN_GO_VERSION ?= 1.19 EACHMODULE_FAILFAST ?= true EACHMODULE_FAILFAST_FLAG=-fail-fast=${EACHMODULE_FAILFAST} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/README.md b/vendor/github.com/aws/aws-sdk-go-v2/README.md index 54626706f..9a6d0f4fd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/README.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/README.md @@ -4,7 +4,7 @@ `aws-sdk-go-v2` is the v2 AWS SDK for the Go programming language. -The v2 SDK requires a minimum version of `Go 1.15`. +The v2 SDK requires a minimum version of `Go 1.19`. Check out the [release notes](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) for information about the latest bug fixes, updates, and features added to the SDK. @@ -23,6 +23,15 @@ following in the AWS SDKs and Tools Shared Configuration and Credentials Referen * [AWS SDKs and Tools Maintenance Policy](https://docs.aws.amazon.com/credref/latest/refdocs/maint-policy.html) * [AWS SDKs and Tools Version Support Matrix](https://docs.aws.amazon.com/credref/latest/refdocs/version-support-matrix.html) +### Go version support policy + +The v2 SDK follows the upstream [release policy](https://go.dev/doc/devel/release#policy) +with an additional six months of support for the most recently deprecated +language version. + +**AWS reserves the right to drop support for unsupported Go versions earlier to +address critical security issues.** + ## Getting started To get started working with the SDK setup your project for Go modules, and retrieve the SDK dependencies with `go get`. This example shows how you can use the v2 SDK to make an API request using the SDK's [Amazon DynamoDB] client. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go index fe7aacbfa..b361c1386 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go @@ -146,6 +146,10 @@ type Config struct { // See https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html for // more information on environment variables and shared config settings. AppID string + + // BaseEndpoint is an intermediary transfer location to a service specific + // BaseEndpoint on a service's Options. + BaseEndpoint *string } // NewConfig returns a new Config pointer that can be chained with builder diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 8cd672b6f..84b19df69 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.21.0" +const goModuleVersion = "1.22.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md index 44d08cee7..3a982d697 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.5.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). + +# v1.4.14 (2023-10-06) + +* No change notes available for this release. + # v1.4.13 (2023-08-18) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go index 19f7e20cb..80f252c61 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go @@ -3,4 +3,4 @@ package eventstream // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.4.13" +const goModuleVersion = "1.5.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go index 00d7d3eee..987affdde 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go @@ -97,11 +97,21 @@ func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary { var netOpErr *net.OpError var dnsError *net.DNSError - switch { - case errors.As(err, &dnsError): + if errors.As(err, &dnsError) { // NXDOMAIN errors should not be retried - retryable = !dnsError.IsNotFound && dnsError.IsTemporary + if dnsError.IsNotFound { + return aws.BoolTernary(false) + } + // if !dnsError.Temporary(), error may or may not be temporary, + // (i.e. !Temporary() =/=> !retryable) so we should fall through to + // remaining checks + if dnsError.Temporary() { + return aws.BoolTernary(true) + } + } + + switch { case errors.As(err, &conErr) && conErr.ConnectionError(): retryable = true diff --git a/vendor/github.com/aws/aws-sdk-go-v2/ci-find-smithy-go.sh b/vendor/github.com/aws/aws-sdk-go-v2/ci-find-smithy-go.sh index 9b8f3a3d2..4da5d09cb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/ci-find-smithy-go.sh +++ b/vendor/github.com/aws/aws-sdk-go-v2/ci-find-smithy-go.sh @@ -18,7 +18,7 @@ if [ -z "$RUNNER_TMPDIR" ]; then exit 1 fi -branch=`git branch --show-current` +branch=$(git branch --show-current) if [ "$branch" == main ]; then echo aws-sdk-go-v2 is on branch main, stop exit 0 @@ -38,10 +38,10 @@ fi echo on branch \"$branch\" while [ -n "$branch" ] && [[ "$branch" == *-* ]]; do - echo looking for $branch... - git ls-remote --exit-code --heads $repository refs/heads/$branch + echo looking for "$branch"... + git ls-remote --exit-code --heads "$repository" refs/heads/"$branch" if [ "$?" == 0 ]; then - echo found $branch + echo found "$branch" matched_branch=$branch break fi @@ -54,5 +54,5 @@ if [ -z "$matched_branch" ]; then exit 0 fi -git clone -b $matched_branch $repository $RUNNER_TMPDIR/smithy-go +git clone -b "$matched_branch" "$repository" "$RUNNER_TMPDIR"/smithy-go SMITHY_GO_SRC=$RUNNER_TMPDIR/smithy-go make gen-mod-replace-smithy-. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 989b2642c..88593b55e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,57 @@ +# v1.22.0 (2023-11-02) + +* **Feature**: Add env and shared config settings for disabling IMDSv1 fallback. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.21.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.20.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.19.1 (2023-10-24) + +* No change notes available for this release. + +# v1.19.0 (2023-10-16) + +* **Feature**: Modify logic of retrieving user agent appID from env config + +# v1.18.45 (2023-10-12) + +* **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.44 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.43 (2023-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.42 (2023-09-22) + +* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. +* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.41 (2023-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.40 (2023-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.39 (2023-09-05) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.18.38 (2023-08-31) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go index 138f8e76d..ef9f753d4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go @@ -2,18 +2,11 @@ package config import ( "context" + "os" "github.com/aws/aws-sdk-go-v2/aws" ) -// defaultLoaders are a slice of functions that will read external configuration -// sources for configuration values. These values are read by the AWSConfigResolvers -// using interfaces to extract specific information from the external configuration. -var defaultLoaders = []loader{ - loadEnvConfig, - loadSharedConfigIgnoreNotExist, -} - // defaultAWSConfigResolvers are a slice of functions that will resolve external // configuration values into AWS configuration values. // @@ -79,6 +72,8 @@ var defaultAWSConfigResolvers = []awsConfigResolver{ // Sets the sdk app ID if present in shared config profile resolveAppID, + + resolveBaseEndpoint, } // A Config represents a generic configuration value or set of values. This type @@ -190,7 +185,7 @@ func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) // assign Load Options to configs var cfgCpy = configs{options} - cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, defaultLoaders) + cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, resolveConfigLoaders(&options)) if err != nil { return aws.Config{}, err } @@ -202,3 +197,17 @@ func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error) return cfg, nil } + +func resolveConfigLoaders(options *LoadOptions) []loader { + loaders := make([]loader, 2) + loaders[0] = loadEnvConfig + + // specification of a profile should cause a load failure if it doesn't exist + if os.Getenv(awsProfileEnvVar) != "" || options.SharedConfigProfile != "" { + loaders[1] = loadSharedConfig + } else { + loaders[1] = loadSharedConfigIgnoreNotExist + } + + return loaders +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go index 63ecd02b3..78bc14933 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go @@ -57,7 +57,8 @@ const ( awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT" - awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" + awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED" + awsEc2MetadataV1DisabledEnvVar = "AWS_EC2_METADATA_V1_DISABLED" awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS" @@ -69,6 +70,10 @@ const ( awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS" awsRetryMode = "AWS_RETRY_MODE" + awsSdkAppID = "AWS_SDK_UA_APP_ID" + + awsIgnoreConfiguredEndpoints = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS" + awsEndpointURL = "AWS_ENDPOINT_URL" ) var ( @@ -205,6 +210,11 @@ type EnvConfig struct { // AWS_EC2_METADATA_DISABLED=true EC2IMDSClientEnableState imds.ClientEnableState + // Specifies if EC2 IMDSv1 fallback is disabled. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6) // // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 @@ -248,6 +258,16 @@ type EnvConfig struct { // // aws_retry_mode=standard RetryMode aws.RetryMode + + // aws sdk app ID that can be added to user agent header string + AppID string + + // Flag used to disable configured endpoints. + IgnoreConfiguredEndpoints *bool + + // Value to contain configured endpoints to be propagated to + // corresponding endpoint resolution field. + BaseEndpoint string } // loadEnvConfig reads configuration values from the OS's environment variables. @@ -288,6 +308,8 @@ func NewEnvConfig() (EnvConfig, error) { cfg.RoleARN = os.Getenv(awsRoleARNEnvVar) cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar) + cfg.AppID = os.Getenv(awsSdkAppID) + if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil { return cfg, err } @@ -301,6 +323,9 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar) + if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnvVar}); err != nil { + return cfg, err + } if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil { return cfg, err @@ -325,6 +350,12 @@ func NewEnvConfig() (EnvConfig, error) { return cfg, err } + setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURL}) + + if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpoints}); err != nil { + return cfg, err + } + return cfg, nil } @@ -335,6 +366,10 @@ func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, return c.DefaultsMode, true, nil } +func (c EnvConfig) getAppID(context.Context) (string, bool, error) { + return c.AppID, len(c.AppID) > 0, nil +} + // GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified, // and not 0. func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) { @@ -472,6 +507,34 @@ func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) { return bytes.NewReader(b), true, nil } +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func (c EnvConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { + if c.IgnoreConfiguredEndpoints == nil { + return false, false, nil + } + + return *c.IgnoreConfiguredEndpoints, true, nil +} + +func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) { + return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil +} + +// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use +// with configured endpoints. +func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { + if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURL, normalizeEnv(sdkID))); endpt != "" { + return endpt, true, nil + } + return "", false, nil +} + +func normalizeEnv(sdkID string) string { + upper := strings.ToUpper(sdkID) + return strings.ReplaceAll(upper, " ", "_") +} + // GetS3UseARNRegion returns whether to allow ARNs to direct the region // the S3 client's requests are sent to. func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) { @@ -663,3 +726,13 @@ func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) { return c.EC2IMDSEndpoint, true, nil } + +// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option +// resolver interface. +func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { + if c.EC2IMDSv1Disabled == nil { + return false, false + } + + return *c.EC2IMDSv1Disabled, true +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 9195330a9..1450b496b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.38" +const goModuleVersion = "1.22.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go index 69e54b77f..d52358460 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go @@ -122,6 +122,58 @@ func getRegion(ctx context.Context, configs configs) (value string, found bool, return } +// IgnoreConfiguredEndpointsProvider is needed to search for all providers +// that provide a flag to disable configured endpoints. +type IgnoreConfiguredEndpointsProvider interface { + GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { + value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) + if err != nil || found { + break + } + } + } + return +} + +type baseEndpointProvider interface { + getBaseEndpoint(ctx context.Context) (string, bool, error) +} + +func getBaseEndpoint(ctx context.Context, configs configs) (value string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(baseEndpointProvider); ok { + value, found, err = p.getBaseEndpoint(ctx) + if err != nil || found { + break + } + } + } + return +} + +type servicesObjectProvider interface { + getServicesObject(ctx context.Context) (map[string]map[string]string, bool, error) +} + +func getServicesObject(ctx context.Context, configs configs) (value map[string]map[string]string, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(servicesObjectProvider); ok { + value, found, err = p.getServicesObject(ctx) + if err != nil || found { + break + } + } + } + return +} + // appIDProvider provides access to the sdk app ID value type appIDProvider interface { getAppID(ctx context.Context) (string, bool, error) diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go index b03705350..b5a74b231 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go @@ -106,6 +106,29 @@ func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error return nil } +func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error { + var downcastCfgSources []interface{} + for _, cs := range configs { + downcastCfgSources = append(downcastCfgSources, interface{}(cs)) + } + + if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil { + cfg.BaseEndpoint = nil + return nil + } + + v, found, err := getBaseEndpoint(ctx, configs) + if err != nil { + return err + } + + if !found { + return nil + } + cfg.BaseEndpoint = aws.String(v) + return nil +} + // resolveAppID extracts the sdk app ID from the configs slice's SharedConfig or env var func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error { ID, _, err := getAppID(ctx, configs) @@ -113,10 +136,6 @@ func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error { return err } - // if app ID is set in env var, it should precedence shared config value - if appID := os.Getenv(`AWS_SDK_UA_APP_ID`); len(appID) > 0 { - ID = appID - } cfg.AppID = ID return nil } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go index e699194d3..20683bf5f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go @@ -28,6 +28,10 @@ const ( // the shared config file, not the credentials file. ssoSectionPrefix = `sso-session ` + // Prefix for services section. It is referenced in profile via the services + // parameter to configure clients for service-specific parameters. + servicesPrefix = `services` + // string equivalent for boolean endpointDiscoveryDisabled = `false` endpointDiscoveryEnabled = `true` @@ -75,6 +79,8 @@ const ( ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + // Use DualStack Endpoint Resolution useDualStackEndpoint = "use_dualstack_endpoint" @@ -97,6 +103,10 @@ const ( caBundleKey = "ca_bundle" sdkAppID = "sdk_ua_app_id" + + ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls" + + endpointURL = "endpoint_url" ) // defaultSharedConfigProfile allows for swapping the default profile for testing @@ -150,6 +160,24 @@ func (s *SSOSession) setFromIniSection(section ini.Section) { updateString(&s.SSOStartURL, section, ssoStartURLKey) } +// Services contains values configured in the services section +// of the AWS configuration file. +type Services struct { + // Services section values + // {"serviceId": {"key": "value"}} + // e.g. {"s3": {"endpoint_url": "example.com"}} + ServiceValues map[string]map[string]string +} + +func (s *Services) setFromIniSection(section ini.Section) { + if s.ServiceValues == nil { + s.ServiceValues = make(map[string]map[string]string) + } + for _, service := range section.List() { + s.ServiceValues[service] = section.Map(service) + } +} + // SharedConfig represents the configuration fields of the SDK config files. type SharedConfig struct { Profile string @@ -220,6 +248,12 @@ type SharedConfig struct { // ec2_metadata_service_endpoint=http://fd00:ec2::254 EC2IMDSEndpoint string + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + // Specifies if the S3 service should disable support for Multi-Region // access-points // @@ -272,6 +306,16 @@ type SharedConfig struct { // aws sdk app ID that can be added to user agent header string AppID string + + // Flag used to disable configured endpoints. + IgnoreConfiguredEndpoints *bool + + // Value to contain configured endpoints to be propagated to + // corresponding endpoint resolution field. + BaseEndpoint string + + // Value to contain services section content. + Services Services } func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) { @@ -361,6 +405,16 @@ func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) { return c.EC2IMDSEndpoint, true, nil } +// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option +// resolver interface. +func (c SharedConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) { + if c.EC2IMDSv1Disabled == nil { + return false, false + } + + return *c.EC2IMDSv1Disabled, true +} + // GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be // used for requests. func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) { @@ -399,6 +453,40 @@ func (c SharedConfig) getAppID(context.Context) (string, bool, error) { return c.AppID, len(c.AppID) > 0, nil } +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +func (c SharedConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) { + if c.IgnoreConfiguredEndpoints == nil { + return false, false, nil + } + + return *c.IgnoreConfiguredEndpoints, true, nil +} + +func (c SharedConfig) getBaseEndpoint(context.Context) (string, bool, error) { + return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil +} + +// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use +// with configured endpoints. +func (c SharedConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) { + if service, ok := c.Services.ServiceValues[normalizeShared(sdkID)]; ok { + if endpt, ok := service[endpointURL]; ok { + return endpt, true, nil + } + } + return "", false, nil +} + +func normalizeShared(sdkID string) string { + lower := strings.ToLower(sdkID) + return strings.ReplaceAll(lower, " ", "_") +} + +func (c SharedConfig) getServicesObject(context.Context) (map[string]map[string]string, bool, error) { + return c.Services.ServiceValues, c.Services.ServiceValues != nil, nil +} + // loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the // addition of ignoring when none of the files exist or when the profile // is not found in any of the files. @@ -548,6 +636,7 @@ func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func cfg := SharedConfig{} profiles := map[string]struct{}{} + if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil { return SharedConfig{}, err } @@ -576,6 +665,7 @@ func processConfigSections(ctx context.Context, sections *ini.Sections, logger l skipSections[newName] = struct{}{} case strings.HasPrefix(section, ssoSectionPrefix): + case strings.HasPrefix(section, servicesPrefix): case strings.EqualFold(section, "default"): default: // drop this section, as invalid profile name @@ -735,11 +825,14 @@ func mergeSections(dst *ini.Sections, src ini.Sections) error { s3DisableMultiRegionAccessPointsKey, ec2MetadataServiceEndpointModeKey, ec2MetadataServiceEndpointKey, + ec2MetadataV1DisabledKey, useDualStackEndpoint, useFIPSEndpointKey, defaultsModeKey, retryModeKey, caBundleKey, + roleDurationSecondsKey, + retryMaxAttemptsKey, ssoSessionNameKey, ssoAccountIDKey, @@ -753,16 +846,6 @@ func mergeSections(dst *ini.Sections, src ini.Sections) error { } } - intKeys := []string{ - roleDurationSecondsKey, - retryMaxAttemptsKey, - } - for i := range intKeys { - if err := mergeIntKey(&srcSection, &dstSection, sectionName, intKeys[i]); err != nil { - return err - } - } - // set srcSection on dst srcSection *dst = dst.SetSection(sectionName, dstSection) } @@ -789,26 +872,6 @@ func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionNam return nil } -func mergeIntKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error { - if srcSection.Has(key) { - srcValue := srcSection.Int(key) - v, err := ini.NewIntValue(srcValue) - if err != nil { - return fmt.Errorf("error merging %s, %w", key, err) - } - - if dstSection.Has(key) { - dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key, - dstSection.SourceFile[key], srcSection.SourceFile[key])) - - } - - dstSection.UpdateValue(key, v) - dstSection.UpdateSourceFile(key, srcSection.SourceFile[key]) - } - return nil -} - func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string { return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+ "with a %v value found in a duplicate profile defined at file %v. \n", @@ -912,6 +975,17 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile c.SSOSession = &ssoSession } + for _, sectionName := range sections.List() { + if strings.HasPrefix(sectionName, servicesPrefix) { + section, ok := sections.GetSection(sectionName) + if ok { + var svcs Services + svcs.setFromIniSection(section) + c.Services = svcs + } + } + } + return nil } @@ -962,9 +1036,16 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er updateString(&c.SSOAccountID, section, ssoAccountIDKey) updateString(&c.SSORoleName, section, ssoRoleNameKey) + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for #2276: + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 if section.Has(roleDurationSecondsKey) { - d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second - c.RoleDurationSeconds = &d + if v, ok := section.Int(roleDurationSecondsKey); ok { + c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second) + } else { + c.RoleDurationSeconds = aws.Duration(time.Duration(0)) + } } updateString(&c.CredentialProcess, section, credentialProcessKey) @@ -978,6 +1059,7 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err) } updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&c.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint) updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey) @@ -998,6 +1080,10 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er // user agent app ID added to request User-Agent header updateString(&c.AppID, section, sdkAppID) + updateBoolPtr(&c.IgnoreConfiguredEndpoints, section, ignoreConfiguredEndpoints) + + updateString(&c.BaseEndpoint, section, endpointURL) + // Shared Credentials creds := aws.Credentials{ AccessKeyID: section.String(accessKeyIDKey), @@ -1314,12 +1400,13 @@ func updateInt(dst *int, section ini.Section, key string) error { if !section.Has(key) { return nil } - if vt, _ := section.ValueType(key); vt != ini.IntegerType { - return fmt.Errorf("invalid value %s=%s, expect integer", - key, section.String(key)) + v, ok := section.Int(key) + if !ok { + return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key)) } - *dst = int(section.Int(key)) + + *dst = int(v) return nil } @@ -1329,7 +1416,10 @@ func updateBool(dst *bool, section ini.Section, key string) { if !section.Has(key) { return } - *dst = section.Bool(key) + + // retains pre-#2276 behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v } // updateBoolPtr will only update the dst with the value in the section key, @@ -1338,8 +1428,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) { if !section.Has(key) { return } + + // retains pre-#2276 behavior where non-bool value would resolve to false + v, _ := section.Bool(key) *dst = new(bool) - **dst = section.Bool(key) + **dst = v } // updateEndpointDiscoveryType will only update the dst with the value in the section, if @@ -1371,7 +1464,8 @@ func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Sec return } - if section.Bool(key) { + // retains pre-#2276 behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = aws.DualStackEndpointStateEnabled } else { *dst = aws.DualStackEndpointStateDisabled @@ -1387,7 +1481,8 @@ func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key return } - if section.Bool(key) { + // retains pre-#2276 behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = aws.FIPSEndpointStateEnabled } else { *dst = aws.FIPSEndpointStateDisabled diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 03d384fbc..6c3ca4670 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,45 @@ +# v1.15.1 (2023-11-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.43 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.42 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.41 (2023-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.40 (2023-09-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.39 (2023-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.38 (2023-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.37 (2023-09-05) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.36 (2023-08-31) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 77255ad08..e6cba9dc3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.36" +const goModuleVersion = "1.15.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 66b3e83c3..ba06fb199 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,24 @@ +# v1.14.2 (2023-11-02) + +* No change notes available for this release. + +# v1.14.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.14.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.13 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.12 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.11 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go index e55edd992..46e144d93 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go @@ -119,6 +119,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { resolveClientEnableState(cfg, &opts) resolveEndpointConfig(cfg, &opts) resolveEndpointModeConfig(cfg, &opts) + resolveEnableFallback(cfg, &opts) return New(opts, optFns...) } @@ -328,3 +329,20 @@ func resolveEndpointConfig(cfg aws.Config, options *Options) error { options.Endpoint = value return nil } + +func resolveEnableFallback(cfg aws.Config, options *Options) { + if options.EnableFallback != aws.UnknownTernary { + return + } + + disabled, ok := internalconfig.ResolveV1FallbackDisabled(cfg.ConfigSources) + if !ok { + return + } + + if disabled { + options.EnableFallback = aws.FalseTernary + } else { + options.EnableFallback = aws.TrueTernary + } +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index deb28a237..36432a4ff 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.11" +const goModuleVersion = "1.14.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go index d72fcb562..ce7745589 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go @@ -58,6 +58,10 @@ type EndpointResolver interface { GetEC2IMDSEndpoint() (string, bool, error) } +type v1FallbackDisabledResolver interface { + GetEC2IMDSV1FallbackDisabled() (bool, bool) +} + // ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources. func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) { for _, source := range sources { @@ -96,3 +100,15 @@ func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err } return value, found, err } + +// ResolveV1FallbackDisabled ... +func ResolveV1FallbackDisabled(sources []interface{}) (bool, bool) { + for _, source := range sources { + if resolver, ok := source.(v1FallbackDisabledResolver); ok { + if v, found := resolver.GetEC2IMDSV1FallbackDisabled(); found { + return v, true + } + } + } + return false, false +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md index 70e29d3b9..81390290f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md @@ -1,3 +1,57 @@ +# v1.13.1 (2023-11-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.13.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.12.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.92 (2023-10-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.91 (2023-10-16) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.90 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.89 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.88 (2023-10-02) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.87 (2023-09-26) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.86 (2023-09-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.85 (2023-09-20) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.84 (2023-09-18) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.11.83 (2023-09-05) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.82 (2023-08-31) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go index 671cedb27..8a7aaa2d0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go @@ -3,4 +3,4 @@ package manager // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.82" +const goModuleVersion = "1.13.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index 9826a85e6..e26f211e8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.2.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.43 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.42 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.41 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go new file mode 100644 index 000000000..e7835f852 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go @@ -0,0 +1,57 @@ +package configsources + +import ( + "context" +) + +// ServiceBaseEndpointProvider is needed to search for all providers +// that provide a configured service endpoint +type ServiceBaseEndpointProvider interface { + GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) +} + +// IgnoreConfiguredEndpointsProvider is needed to search for all providers +// that provide a flag to disable configured endpoints. +// +// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because +// service packages cannot import github.com/aws/aws-sdk-go-v2/config +// due to result import cycle error. +type IgnoreConfiguredEndpointsProvider interface { + GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error) +} + +// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured +// endpoints feature. +// +// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because +// service packages cannot import github.com/aws/aws-sdk-go-v2/config +// due to result import cycle error. +func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) { + for _, cfg := range configs { + if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok { + value, found, err = p.GetIgnoreConfiguredEndpoints(ctx) + if err != nil || found { + break + } + } + } + return +} + +// ResolveServiceBaseEndpoint is used to retrieve service endpoints from configured sources +// while allowing for configured endpoints to be disabled +func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []interface{}) (value string, found bool, err error) { + if val, found, _ := GetIgnoreConfiguredEndpoints(ctx, configs); found && val { + return "", false, nil + } + + for _, cs := range configs { + if p, ok := cs.(ServiceBaseEndpointProvider); ok { + value, found, err = p.GetServiceBaseEndpoint(context.Background(), sdkID) + if err != nil || found { + break + } + } + } + return +} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index c7eec1c13..7766ee918 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.41" +const goModuleVersion = "1.2.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json index 4e0c6f123..ab107ca55 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json @@ -4,6 +4,7 @@ "outputs" : { "dnsSuffix" : "amazonaws.com", "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-east-1", "name" : "aws", "supportsDualStack" : true, "supportsFIPS" : true @@ -103,6 +104,7 @@ "outputs" : { "dnsSuffix" : "amazonaws.com.cn", "dualStackDnsSuffix" : "api.amazonwebservices.com.cn", + "implicitGlobalRegion" : "cn-northwest-1", "name" : "aws-cn", "supportsDualStack" : true, "supportsFIPS" : true @@ -124,6 +126,7 @@ "outputs" : { "dnsSuffix" : "amazonaws.com", "dualStackDnsSuffix" : "api.aws", + "implicitGlobalRegion" : "us-gov-west-1", "name" : "aws-us-gov", "supportsDualStack" : true, "supportsFIPS" : true @@ -145,6 +148,7 @@ "outputs" : { "dnsSuffix" : "c2s.ic.gov", "dualStackDnsSuffix" : "c2s.ic.gov", + "implicitGlobalRegion" : "us-iso-east-1", "name" : "aws-iso", "supportsDualStack" : false, "supportsFIPS" : true @@ -166,6 +170,7 @@ "outputs" : { "dnsSuffix" : "sc2s.sgov.gov", "dualStackDnsSuffix" : "sc2s.sgov.gov", + "implicitGlobalRegion" : "us-isob-east-1", "name" : "aws-iso-b", "supportsDualStack" : false, "supportsFIPS" : true @@ -184,6 +189,7 @@ "outputs" : { "dnsSuffix" : "cloud.adc-e.uk", "dualStackDnsSuffix" : "cloud.adc-e.uk", + "implicitGlobalRegion" : "eu-isoe-west-1", "name" : "aws-iso-e", "supportsDualStack" : false, "supportsFIPS" : true @@ -195,6 +201,7 @@ "outputs" : { "dnsSuffix" : "csp.hci.ic.gov", "dualStackDnsSuffix" : "csp.hci.ic.gov", + "implicitGlobalRegion" : "us-isof-south-1", "name" : "aws-iso-f", "supportsDualStack" : false, "supportsFIPS" : true diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index a470e22de..e19cd6630 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,20 @@ +# v2.5.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.5.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.37 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v2.4.36 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.4.35 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index ac28b313b..7128c248c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.4.35" +const goModuleVersion = "2.5.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index 74eff6a52..558a781de 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,26 @@ +# v1.5.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.4.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.45 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.44 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.3.43 (2023-09-22) + +* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0. +* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats. + # v1.3.42 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 709294b9e..cd6cdb2d8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.42" +const goModuleVersion = "1.5.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go index abf1fb036..ab16760dd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go @@ -67,6 +67,8 @@ func (l *iniLexer) tokenize(b []byte) ([]Token, error) { for len(runes) > 0 && count < tokenAmount { switch { + case isSubProperty(runes): + tokens[count], n, err = newLitToken(runes) case isWhitespace(runes[0]): tokens[count], n, err = newWSToken(runes) case isComma(runes[0]): @@ -101,6 +103,8 @@ func countTokens(runes []rune) int { for len(runes) > 0 { switch { + case isSubProperty(runes): + _, n, err = newLitToken(runes) case isWhitespace(runes[0]): _, n, err = newWSToken(runes) case isComma(runes[0]): diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go index eca42d1b2..a24903202 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go @@ -12,34 +12,6 @@ var ( runesFalse = []rune("false") ) -var literalValues = [][]rune{ - runesTrue, - runesFalse, -} - -func isBoolValue(b []rune) bool { - for _, lv := range literalValues { - if isCaselessLitValue(lv, b) { - return true - } - } - return false -} - -func isLitValue(want, have []rune) bool { - if len(have) < len(want) { - return false - } - - for i := 0; i < len(want); i++ { - if want[i] != have[i] { - return false - } - } - - return true -} - // isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency. func isCaselessLitValue(want, have []rune) bool { if len(have) < len(want) { @@ -55,68 +27,6 @@ func isCaselessLitValue(want, have []rune) bool { return true } -// isNumberValue will return whether not the leading characters in -// a byte slice is a number. A number is delimited by whitespace or -// the newline token. -// -// A number is defined to be in a binary, octal, decimal (int | float), hex format, -// or in scientific notation. -func isNumberValue(b []rune) bool { - negativeIndex := 0 - helper := numberHelper{} - needDigit := false - - for i := 0; i < len(b); i++ { - negativeIndex++ - - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return false - } - helper.Determine(b[i]) - needDigit = true - continue - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return false - } - negativeIndex = 0 - needDigit = true - continue - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - needDigit = true - if i == 0 { - return false - } - - fallthrough - case '.': - if err := helper.Determine(b[i]); err != nil { - return false - } - needDigit = true - continue - } - - if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) { - return !needDigit - } - - if !helper.CorrectByte(b[i]) { - return false - } - needDigit = false - } - - return !needDigit -} - func isValid(b []rune) (bool, int, error) { if len(b) == 0 { // TODO: should probably return an error @@ -138,14 +48,8 @@ func (v ValueType) String() string { switch v { case NoneType: return "NONE" - case DecimalType: - return "FLOAT" - case IntegerType: - return "INT" case StringType: return "STRING" - case BoolType: - return "BOOL" } return "" @@ -154,11 +58,8 @@ func (v ValueType) String() string { // ValueType enums const ( NoneType = ValueType(iota) - DecimalType - IntegerType StringType QuotedStringType - BoolType ) // Value is a union container @@ -166,10 +67,8 @@ type Value struct { Type ValueType raw []rune - integer int64 - decimal float64 - boolean bool - str string + str string + mp map[string]string } func newValue(t ValueType, base int, raw []rune) (Value, error) { @@ -177,36 +76,18 @@ func newValue(t ValueType, base int, raw []rune) (Value, error) { Type: t, raw: raw, } - var err error switch t { - case DecimalType: - v.decimal, err = strconv.ParseFloat(string(raw), 64) - case IntegerType: - if base != 10 { - raw = raw[2:] - } - - v.integer, err = strconv.ParseInt(string(raw), base, 64) case StringType: v.str = string(raw) + if isSubProperty(raw) { + v.mp = v.MapValue() + } case QuotedStringType: v.str = string(raw[1 : len(raw)-1]) - case BoolType: - v.boolean = isCaselessLitValue(runesTrue, v.raw) } - // issue 2253 - // - // if the value trying to be parsed is too large, then we will use - // the 'StringType' and raw value instead. - if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange { - v.Type = StringType - v.str = string(raw) - err = nil - } - - return v, err + return v, nil } // NewStringValue returns a Value type generated using a string input. @@ -214,24 +95,12 @@ func NewStringValue(str string) (Value, error) { return newValue(StringType, 10, []rune(str)) } -// NewIntValue returns a Value type generated using an int64 input. -func NewIntValue(i int64) (Value, error) { - v := strconv.FormatInt(i, 10) - return newValue(IntegerType, 10, []rune(v)) -} - func (v Value) String() string { switch v.Type { - case DecimalType: - return fmt.Sprintf("decimal: %f", v.decimal) - case IntegerType: - return fmt.Sprintf("integer: %d", v.integer) case StringType: return fmt.Sprintf("string: %s", string(v.raw)) case QuotedStringType: return fmt.Sprintf("quoted string: %s", string(v.raw)) - case BoolType: - return fmt.Sprintf("bool: %t", v.boolean) default: return "union not set" } @@ -247,26 +116,15 @@ func newLitToken(b []rune) (Token, int, error) { if err != nil { return token, n, err } - token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) + } else if isSubProperty(b) { + offset := 0 + end, err := getSubProperty(b, offset) if err != nil { - return token, 0, err + return token, n, err } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) + token = newToken(TokenLit, b[offset:end], StringType) + n = end } else { n, err = getValue(b) token = newToken(TokenLit, b[:n], StringType) @@ -275,19 +133,91 @@ func newLitToken(b []rune) (Token, int, error) { return token, n, err } +func isSubProperty(runes []rune) bool { + // needs at least + // (1) newline (2) whitespace (3) literal + if len(runes) < 3 { + return false + } + + // must have an equal expression + split := strings.FieldsFunc(string(runes), func(r rune) bool { + return isOp([]rune{r}) + }) + if len(split) < 2 { + return false + } + + // must start with a new line + if !isNewline(runes) { + return false + } + _, n, err := newNewlineToken(runes) + if err != nil { + return false + } + + // whitespace must follow newline + return isWhitespace(runes[n]) +} + +// getSubProperty pulls all subproperties and terminates when +// it hits a newline that is not the start of another subproperty. +// offset allows for removal of leading newline and whitespace +// characters +func getSubProperty(runes []rune, offset int) (int, error) { + for idx, val := range runes[offset:] { + if val == '\n' && !isSubProperty(runes[offset+idx:]) { + return offset + idx, nil + } + } + return 0, fmt.Errorf("no sub property") +} + +// MapValue returns a map value for sub properties +func (v Value) MapValue() map[string]string { + newlineParts := strings.Split(string(v.raw), "\n") + mp := make(map[string]string) + for _, part := range newlineParts { + operandParts := strings.Split(part, "=") + if len(operandParts) < 2 { + continue + } + key := strings.TrimSpace(operandParts[0]) + val := strings.TrimSpace(operandParts[1]) + mp[key] = val + } + return mp +} + // IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.raw), 0, 64) + if err != nil { + return 0, false + } + return i, true } // FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.raw), 64) + if err != nil { + return 0, false + } + return f, true } // BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if isCaselessLitValue(runesTrue, v.raw) { + return true, true + } else if isCaselessLitValue(runesFalse, v.raw) { + return false, true + } + return false, false } func isTrimmable(r rune) bool { @@ -301,6 +231,7 @@ func isTrimmable(r rune) bool { // StringValue returns the string value func (v Value) StringValue() string { switch v.Type { + case StringType: return strings.TrimFunc(string(v.raw), isTrimmable) case QuotedStringType: diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go deleted file mode 100644 index a45c0bc56..000000000 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go +++ /dev/null @@ -1,152 +0,0 @@ -package ini - -import ( - "bytes" - "fmt" - "strconv" -) - -const ( - none = numberFormat(iota) - binary - octal - decimal - hex - exponent -) - -type numberFormat int - -// numberHelper is used to dictate what format a number is in -// and what to do for negative values. Since -1e-4 is a valid -// number, we cannot just simply check for duplicate negatives. -type numberHelper struct { - numberFormat numberFormat - - negative bool - negativeExponent bool -} - -func (b numberHelper) Exists() bool { - return b.numberFormat != none -} - -func (b numberHelper) IsNegative() bool { - return b.negative || b.negativeExponent -} - -func (b *numberHelper) Determine(c rune) error { - if b.Exists() { - return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c))) - } - - switch c { - case 'b': - b.numberFormat = binary - case 'o': - b.numberFormat = octal - case 'x': - b.numberFormat = hex - case 'e', 'E': - b.numberFormat = exponent - case '-': - if b.numberFormat != exponent { - b.negative = true - } else { - b.negativeExponent = true - } - case '.': - b.numberFormat = decimal - default: - return NewParseError(fmt.Sprintf("invalid number character: %v", string(c))) - } - - return nil -} - -func (b numberHelper) CorrectByte(c rune) bool { - switch { - case b.numberFormat == binary: - if !isBinaryByte(c) { - return false - } - case b.numberFormat == octal: - if !isOctalByte(c) { - return false - } - case b.numberFormat == hex: - if !isHexByte(c) { - return false - } - case b.numberFormat == decimal: - if !isDigit(c) { - return false - } - case b.numberFormat == exponent: - if !isDigit(c) { - return false - } - case b.negativeExponent: - if !isDigit(c) { - return false - } - case b.negative: - if !isDigit(c) { - return false - } - default: - if !isDigit(c) { - return false - } - } - - return true -} - -func (b numberHelper) Base() int { - switch b.numberFormat { - case binary: - return 2 - case octal: - return 8 - case hex: - return 16 - default: - return 10 - } -} - -func (b numberHelper) String() string { - buf := bytes.Buffer{} - i := 0 - - switch b.numberFormat { - case binary: - i++ - buf.WriteString(strconv.Itoa(i) + ": binary format\n") - case octal: - i++ - buf.WriteString(strconv.Itoa(i) + ": octal format\n") - case hex: - i++ - buf.WriteString(strconv.Itoa(i) + ": hex format\n") - case exponent: - i++ - buf.WriteString(strconv.Itoa(i) + ": exponent format\n") - default: - i++ - buf.WriteString(strconv.Itoa(i) + ": integer format\n") - } - - if b.negative { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative format\n") - } - - if b.negativeExponent { - i++ - buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n") - } - - return buf.String() -} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go index b5480fdeb..d38a9cd48 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go @@ -41,149 +41,6 @@ func getStringValue(b []rune) (int, error) { return i + 1, nil } -// getBoolValue will return a boolean and the amount -// of bytes read -// -// an error will be returned if the boolean is not of a correct -// value -func getBoolValue(b []rune) (int, error) { - if len(b) < 4 { - return 0, NewParseError("invalid boolean value") - } - - n := 0 - for _, lv := range literalValues { - if len(lv) > len(b) { - continue - } - - if isCaselessLitValue(lv, b) { - n = len(lv) - } - } - - if n == 0 { - return 0, NewParseError("invalid boolean value") - } - - return n, nil -} - -// getNumericalValue will return a numerical string, the amount -// of bytes read, and the base of the number -// -// an error will be returned if the number is not of a correct -// value -func getNumericalValue(b []rune) (int, int, error) { - if !isDigit(b[0]) { - return 0, 0, NewParseError("invalid digit value") - } - - i := 0 - helper := numberHelper{} - -loop: - for negativeIndex := 0; i < len(b); i++ { - negativeIndex++ - - if !isDigit(b[i]) { - switch b[i] { - case '-': - if helper.IsNegative() || negativeIndex != 1 { - return 0, 0, NewParseError("parse error '-'") - } - - n := getNegativeNumber(b[i:]) - i += (n - 1) - helper.Determine(b[i]) - continue - case '.': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - case 'e', 'E': - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - - negativeIndex = 0 - case 'b': - if helper.numberFormat == hex { - break - } - fallthrough - case 'o', 'x': - if i == 0 && b[i] != '0' { - return 0, 0, NewParseError("incorrect base format, expected leading '0'") - } - - if i != 1 { - return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i)) - } - - if err := helper.Determine(b[i]); err != nil { - return 0, 0, err - } - default: - if isWhitespace(b[i]) { - break loop - } - - if isNewline(b[i:]) { - break loop - } - - if !(helper.numberFormat == hex && isHexByte(b[i])) { - if i+2 < len(b) && !isNewline(b[i:i+2]) { - return 0, 0, NewParseError("invalid numerical character") - } else if !isNewline([]rune{b[i]}) { - return 0, 0, NewParseError("invalid numerical character") - } - - break loop - } - } - } - } - - return helper.Base(), i, nil -} - -// isDigit will return whether or not something is an integer -func isDigit(b rune) bool { - return b >= '0' && b <= '9' -} - -func hasExponent(v []rune) bool { - return contains(v, 'e') || contains(v, 'E') -} - -func isBinaryByte(b rune) bool { - switch b { - case '0', '1': - return true - default: - return false - } -} - -func isOctalByte(b rune) bool { - switch b { - case '0', '1', '2', '3', '4', '5', '6', '7': - return true - default: - return false - } -} - -func isHexByte(b rune) bool { - if isDigit(b) { - return true - } - return (b >= 'A' && b <= 'F') || - (b >= 'a' && b <= 'f') -} - func getValue(b []rune) (int, error) { i := 0 @@ -211,24 +68,6 @@ func getValue(b []rune) (int, error) { return i, nil } -// getNegativeNumber will return a negative number from a -// byte slice. This will iterate through all characters until -// a non-digit has been found. -func getNegativeNumber(b []rune) int { - if b[0] != '-' { - return 0 - } - - i := 1 - for ; i < len(b); i++ { - if !isDigit(b[i]) { - return i - } - } - - return i -} - // isEscaped will return whether or not the character is an escaped // character. func isEscaped(value []rune, b rune) bool { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go index a07a63738..c124ad610 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go @@ -220,6 +220,20 @@ func NewSection(name string) Section { } } +// List will return a list of all +// services in values +func (t Section) List() []string { + keys := make([]string, len(t.values)) + i := 0 + for k := range t.values { + keys[i] = k + i++ + } + + sort.Strings(keys) + return keys +} + // UpdateSourceFile updates source file for a property to provided filepath. func (t Section) UpdateSourceFile(property string, filepath string) { t.SourceFile[property] = filepath @@ -245,17 +259,22 @@ func (t Section) ValueType(k string) (ValueType, bool) { } // Bool returns a bool value at k -func (t Section) Bool(k string) bool { +func (t Section) Bool(k string) (bool, bool) { return t.values[k].BoolValue() } // Int returns an integer value at k -func (t Section) Int(k string) int64 { +func (t Section) Int(k string) (int64, bool) { return t.values[k].IntValue() } +// Map returns a map value at k +func (t Section) Map(k string) map[string]string { + return t.values[k].MapValue() +} + // Float64 returns a float value at k -func (t Section) Float64(k string) float64 { +func (t Section) Float64(k string) (float64, bool) { return t.values[k].FloatValue() } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md index 80510904e..bd0c9511c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.2.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.6 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.5 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.4 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go index 5a0e19ae6..ecaa3aa94 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go @@ -3,4 +3,4 @@ package v4a // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.4" +const goModuleVersion = "1.2.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml index dad0dfc63..61c8da5c0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml +++ b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml @@ -1,7 +1,7 @@ [dependencies] "github.com/aws/aws-sdk-go" = "v1.44.28" - "github.com/aws/smithy-go" = "v1.14.2" + "github.com/aws/smithy-go" = "v1.16.0" "github.com/google/go-cmp" = "v0.5.8" "github.com/jmespath/go-jmespath" = "v0.4.0" "golang.org/x/net" = "v0.1.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index 289c55c23..39326eada 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,11 @@ +# v1.10.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). + +# v1.9.15 (2023-10-06) + +* No change notes available for this release. + # v1.9.14 (2023-08-18) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index 187ef12b3..e57bbab9f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.14" +const goModuleVersion = "1.10.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md index cf2e59dd4..260701f19 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.2.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.2.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.38 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.1.37 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.36 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go index 479bff9cb..c3acd0aeb 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go @@ -3,4 +3,4 @@ package checksum // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.36" +const goModuleVersion = "1.2.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index e410f3e48..b8338d32b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.10.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.10.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.37 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.9.36 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.9.35 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index 9c076b11f..d4d780e21 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.35" +const goModuleVersion = "1.10.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md index a7676224d..6ac972d6e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md @@ -1,3 +1,20 @@ +# v1.16.1 (2023-11-01) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.6 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.5 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.15.4 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go index 283fe42a0..4f9ef6325 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go @@ -3,4 +3,4 @@ package s3shared // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.15.4" +const goModuleVersion = "1.16.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md index df50ff01a..9f3eec745 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md @@ -1,3 +1,29 @@ +# v1.42.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.41.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.40.0 (2023-09-26) + +* **Feature**: This release adds a new field COMPLETED to the ReplicationStatus Enum. You can now use this field to validate the replication status of S3 objects using the AWS SDK. + +# v1.39.0 (2023-09-20) + +* **Feature**: Fix an issue where the SDK can fail to unmarshall response due to NumberFormatException + # v1.38.5 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go index 1ddcba2b2..e921189aa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go @@ -347,6 +347,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { resolveDisableMultiRegionAccessPoints(cfg, &opts) resolveUseDualStackEndpoint(cfg, &opts) resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) return New(opts, optFns...) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go index 35e160493..5932f1f2e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_AbortMultipartUpload.go @@ -87,9 +87,11 @@ type AbortMultipartUploadInput struct { ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go index 72e267651..babfebcc6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CompleteMultipartUpload.go @@ -156,9 +156,11 @@ type CompleteMultipartUploadInput struct { MultipartUpload *types.CompletedMultipartUpload // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go index fb34d74bc..97516a258 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CopyObject.go @@ -137,9 +137,12 @@ import ( // use the CopyObject action to change the storage class of an object that is // already stored in Amazon S3 by using the StorageClass parameter. For more // information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) -// in the Amazon S3 User Guide. If the source object's storage class is GLACIER, -// you must restore a copy of this object before you can use it as a source object -// for the copy operation. For more information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) +// in the Amazon S3 User Guide. If the source object's storage class is GLACIER or +// DEEP_ARCHIVE, or the object's storage class is INTELLIGENT_TIERING and it's S3 +// Intelligent-Tiering access tier (https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition) +// is Archive Access or Deep Archive Access, you must restore a copy of this object +// before you can use it as a source object for the copy operation. For more +// information, see RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html) // . For more information, see Copying Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html) // . Versioning By default, x-amz-copy-source header identifies the current // version of an object to copy. If the current version is a delete marker, Amazon @@ -332,9 +335,11 @@ type CopyObjectInput struct { ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer @@ -359,11 +364,11 @@ type CopyObjectInput struct { // JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string - // Specifies the KMS key ID to use for object encryption. All GET and PUT requests - // for an object protected by KMS will fail if they're not made via SSL or using - // SigV4. For information about configuring any of the officially supported Amazon - // Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature - // Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // Specifies the KMS ID (Key ID, Key ARN, or Key Alias) to use for object + // encryption. All GET and PUT requests for an object protected by KMS will fail if + // they're not made via SSL or using SigV4. For information about configuring any + // of the officially supported Amazon Web Services SDKs and Amazon Web Services + // CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. SSEKMSKeyId *string @@ -371,11 +376,11 @@ type CopyObjectInput struct { // (for example, AES256 , aws:kms , aws:kms:dsse ). ServerSideEncryption types.ServerSideEncryption - // By default, Amazon S3 uses the STANDARD Storage Class to store newly created - // objects. The STANDARD storage class provides high durability and high - // availability. Depending on performance needs, you can specify a different - // Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For - // more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) + // If the x-amz-storage-class header is not used, the copied object will be stored + // in the STANDARD Storage Class by default. The STANDARD storage class provides + // high durability and high availability. Depending on performance needs, you can + // specify a different Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS + // Storage Class. For more information, see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) // in the Amazon S3 User Guide. StorageClass types.StorageClass diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go index 7f242b4e6..9da6f8b9c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateBucket.go @@ -26,11 +26,12 @@ import ( // information about bucket naming restrictions, see Bucket naming rules (https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html) // . If you want to create an Amazon S3 on Outposts bucket, see Create Bucket (https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html) // . By default, the bucket is created in the US East (N. Virginia) Region. You can -// optionally specify a Region in the request body. You might choose a Region to -// optimize latency, minimize costs, or address regulatory requirements. For -// example, if you reside in Europe, you will probably find it advantageous to -// create buckets in the Europe (Ireland) Region. For more information, see -// Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) +// optionally specify a Region in the request body. To constrain the bucket +// creation to a specific Region, you can use LocationConstraint (https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucketConfiguration.html) +// condition key. You might choose a Region to optimize latency, minimize costs, or +// address regulatory requirements. For example, if you reside in Europe, you will +// probably find it advantageous to create buckets in the Europe (Ireland) Region. +// For more information, see Accessing a bucket (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro) // . If you send your create bucket request to the s3.amazonaws.com endpoint, the // request goes to the us-east-1 Region. Accordingly, the signature calculations // in Signature Version 4 must use us-east-1 as the Region, even if the location diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go index 740ac066a..2831c54c7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_CreateMultipartUpload.go @@ -290,9 +290,11 @@ type CreateMultipartUploadInput struct { ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer @@ -317,11 +319,12 @@ type CreateMultipartUploadInput struct { // JSON with the encryption context key-value pairs. SSEKMSEncryptionContext *string - // Specifies the ID of the symmetric encryption customer managed key to use for - // object encryption. All GET and PUT requests for an object protected by KMS will - // fail if they're not made via SSL or using SigV4. For information about - // configuring any of the officially supported Amazon Web Services SDKs and Amazon - // Web Services CLI, see Specifying the Signature Version in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) + // Specifies the ID (Key ID, Key ARN, or Key Alias) of the symmetric encryption + // customer managed key to use for object encryption. All GET and PUT requests for + // an object protected by KMS will fail if they're not made via SSL or using SigV4. + // For information about configuring any of the officially supported Amazon Web + // Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version + // in Request Authentication (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version) // in the Amazon S3 User Guide. SSEKMSKeyId *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go index 10d06dcca..5b5cbf187 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObject.go @@ -95,9 +95,11 @@ type DeleteObjectInput struct { MFA *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer @@ -109,8 +111,10 @@ type DeleteObjectInput struct { type DeleteObjectOutput struct { - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. + // Indicates whether the specified object version that was permanently deleted was + // (true) or was not (false) a delete marker before deletion. In a simple DELETE, + // this header indicates whether (true) or not (false) the current version of the + // object is a delete marker. DeleteMarker bool // If present, indicates that the requester was successfully charged for the diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go index befd40d42..b494c18ae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_DeleteObjects.go @@ -119,9 +119,11 @@ type DeleteObjectsInput struct { MFA *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go index c5d666ae2..f11b25aa3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetBucketAccelerateConfiguration.go @@ -65,9 +65,11 @@ type GetBucketAccelerateConfigurationInput struct { ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go index c598abb6e..e84be49a9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObject.go @@ -191,9 +191,11 @@ type GetObjectInput struct { Range *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go index e7cd7b6be..b6ea63ea0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAcl.go @@ -75,9 +75,11 @@ type GetObjectAclInput struct { ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go index bc290eeca..06b1ac2df 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectAttributes.go @@ -139,9 +139,11 @@ type GetObjectAttributesInput struct { PartNumberMarker *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go index a8c513207..c43441697 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectLegalHold.go @@ -63,9 +63,11 @@ type GetObjectLegalHoldInput struct { ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go index 17f5d2279..278d13522 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectRetention.go @@ -63,9 +63,11 @@ type GetObjectRetentionInput struct { ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go index bad883bd9..14ebf1c4e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTagging.go @@ -78,9 +78,11 @@ type GetObjectTaggingInput struct { ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go index c8bcccfce..dd06c1bde 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_GetObjectTorrent.go @@ -59,9 +59,11 @@ type GetObjectTorrentInput struct { ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go index 96e204c19..a121acb16 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_HeadObject.go @@ -155,9 +155,11 @@ type HeadObjectInput struct { Range *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go index 721bbca18..4749ad10b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListMultipartUploads.go @@ -119,9 +119,11 @@ type ListMultipartUploadsInput struct { Prefix *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go index d4a6bf134..00ef2e9ee 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListObjectVersions.go @@ -95,9 +95,11 @@ type ListObjectVersionsInput struct { Prefix *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go index 2db5f0ac8..14ef6f91f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_ListParts.go @@ -100,9 +100,11 @@ type ListPartsInput struct { PartNumberMarker *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go index 696fbe2d6..ef1091961 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketEncryption.go @@ -24,16 +24,14 @@ import ( // default encryption configuration that uses server-side encryption with Amazon S3 // managed keys (SSE-S3). You can optionally configure default encryption for a // bucket by using server-side encryption with Key Management Service (KMS) keys -// (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS keys -// (DSSE-KMS), or server-side encryption with customer-provided keys (SSE-C). If -// you specify default encryption by using SSE-KMS, you can also configure Amazon -// S3 Bucket Keys. For information about bucket default encryption, see Amazon S3 -// bucket default encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) -// in the Amazon S3 User Guide. For more information about S3 Bucket Keys, see -// Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) -// in the Amazon S3 User Guide. This action requires Amazon Web Services Signature -// Version 4. For more information, see Authenticating Requests (Amazon Web -// Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) +// (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys +// (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also +// configure Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// . If you use PutBucketEncryption to set your default bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does +// not validate the KMS key ID provided in PutBucketEncryption requests. This +// action requires Amazon Web Services Signature Version 4. For more information, +// see Authenticating Requests (Amazon Web Services Signature Version 4) (https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html) // . To use this operation, you must have permission to perform the // s3:PutEncryptionConfiguration action. The bucket owner has this permission by // default. The bucket owner can grant this permission to others. For more diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go index bfb5250db..d44e96bc8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketReplication.go @@ -25,17 +25,19 @@ import ( // request body. In the replication configuration, you provide the name of the // destination bucket or buckets where you want Amazon S3 to replicate objects, the // IAM role that Amazon S3 can assume to replicate objects on your behalf, and -// other relevant information. A replication configuration must include at least -// one rule, and can contain a maximum of 1,000. Each rule identifies a subset of -// objects to replicate by filtering the objects in the source bucket. To choose -// additional subsets of objects to replicate, add a rule for each subset. To -// specify a subset of the objects in the source bucket to apply a replication rule -// to, add the Filter element as a child of the Rule element. You can filter -// objects based on an object key prefix, one or more object tags, or both. When -// you add the Filter element in the configuration, you must also add the following -// elements: DeleteMarkerReplication , Status , and Priority . If you are using an -// earlier version of the replication configuration, Amazon S3 handles replication -// of delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) +// other relevant information. You can invoke this request for a specific Amazon +// Web Services Region by using the aws:RequestedRegion (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-requestedregion) +// condition key. A replication configuration must include at least one rule, and +// can contain a maximum of 1,000. Each rule identifies a subset of objects to +// replicate by filtering the objects in the source bucket. To choose additional +// subsets of objects to replicate, add a rule for each subset. To specify a subset +// of the objects in the source bucket to apply a replication rule to, add the +// Filter element as a child of the Rule element. You can filter objects based on +// an object key prefix, one or more object tags, or both. When you add the Filter +// element in the configuration, you must also add the following elements: +// DeleteMarkerReplication , Status , and Priority . If you are using an earlier +// version of the replication configuration, Amazon S3 handles replication of +// delete markers differently. For more information, see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations) // . For information about enabling versioning on a bucket, see Using Versioning (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html) // . Handling Replication of Encrypted Objects By default, Amazon S3 doesn't // replicate objects that are stored at rest using server-side encryption with KMS diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go index 0c2bd3896..9792ad38e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketTagging.go @@ -27,7 +27,7 @@ import ( // specific application name, and then organize your billing information to see the // total cost of that application across several services. For more information, // see Cost Allocation and Tagging (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) -// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html) +// and Using Cost Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) // . When this operation sets the tags for a bucket, it will overwrite any current // tags the bucket already has. You cannot use this operation to add tags to an // existing list of tags. To use this operation, you must have permissions to @@ -35,20 +35,17 @@ import ( // default and can grant this permission to others. For more information about // permissions, see Permissions Related to Bucket Subresource Operations (https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources) // and Managing Access Permissions to Your Amazon S3 Resources (https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html) -// . PutBucketTagging has the following special errors: -// - Error code: InvalidTagError -// - Description: The tag provided was not a valid tag. This error can occur if -// the tag did not pass input validation. For information about tag restrictions, -// see User-Defined Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) -// and Amazon Web Services-Generated Cost Allocation Tag Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html) +// . PutBucketTagging has the following special errors. For more Amazon S3 errors +// see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) +// . +// - InvalidTag - The tag provided was not a valid tag. This error can occur if +// the tag did not pass input validation. For more information, see Using Cost +// Allocation in Amazon S3 Bucket Tags (https://docs.aws.amazon.com/AmazonS3/latest/userguide/CostAllocTagging.html) // . -// - Error code: MalformedXMLError -// - Description: The XML provided does not match the schema. -// - Error code: OperationAbortedError -// - Description: A conflicting conditional action is currently in progress +// - MalformedXML - The XML provided does not match the schema. +// - OperationAborted - A conflicting conditional action is currently in progress // against this resource. Please try again. -// - Error code: InternalError -// - Description: The service was unable to apply the provided tag to the +// - InternalError - The service was unable to apply the provided tag to the // bucket. // // The following operations are related to PutBucketTagging : diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go index 060863535..e21b4c171 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutBucketWebsite.go @@ -61,7 +61,7 @@ import ( // Amazon S3 has a limitation of 50 routing rules per website configuration. If // you require more than 50 routing rules, you can use object redirect. For more // information, see Configuring an Object Redirect (https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html) -// in the Amazon S3 User Guide. +// in the Amazon S3 User Guide. The maximum request length is limited to 128 KB. func (c *Client) PutBucketWebsite(ctx context.Context, params *PutBucketWebsiteInput, optFns ...func(*Options)) (*PutBucketWebsiteOutput, error) { if params == nil { params = &PutBucketWebsiteInput{} diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go index 2b61fcb8b..51c2f1fd4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObject.go @@ -264,9 +264,11 @@ type PutObjectInput struct { ObjectLockRetainUntilDate *time.Time // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer @@ -294,9 +296,9 @@ type PutObjectInput struct { SSEKMSEncryptionContext *string // If x-amz-server-side-encryption has a valid value of aws:kms or aws:kms:dsse , - // this header specifies the ID of the Key Management Service (KMS) symmetric - // encryption customer managed key that was used for the object. If you specify - // x-amz-server-side-encryption:aws:kms or + // this header specifies the ID (Key ID, Key ARN, or Key Alias) of the Key + // Management Service (KMS) symmetric encryption customer managed key that was used + // for the object. If you specify x-amz-server-side-encryption:aws:kms or // x-amz-server-side-encryption:aws:kms:dsse , but do not provide // x-amz-server-side-encryption-aws-kms-key-id , Amazon S3 uses the Amazon Web // Services managed key ( aws/s3 ) to protect the data. If the KMS key does not diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go index 480f15cfb..22a0dcc22 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectAcl.go @@ -199,9 +199,11 @@ type PutObjectAclInput struct { GrantWriteACP *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go index 0245a045f..d4d371df7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLegalHold.go @@ -81,9 +81,11 @@ type PutObjectLegalHoldInput struct { LegalHold *types.ObjectLockLegalHold // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go index b05c857ee..5b4a897e5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectLockConfiguration.go @@ -74,9 +74,11 @@ type PutObjectLockConfigurationInput struct { ObjectLockConfiguration *types.ObjectLockConfiguration // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go index 8715b20da..95176ea69 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectRetention.go @@ -84,9 +84,11 @@ type PutObjectRetentionInput struct { ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go index d8d773e21..e9785b934 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutObjectTagging.go @@ -20,10 +20,10 @@ import ( ) // Sets the supplied tag-set to an object that already exists in a bucket. A tag -// is a key-value pair. You can associate tags with an object by sending a PUT -// request against the tagging subresource that is associated with the object. You -// can retrieve tags by sending a GET request. For more information, see -// GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) +// is a key-value pair. For more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) +// . You can associate tags with an object by sending a PUT request against the +// tagging subresource that is associated with the object. You can retrieve tags by +// sending a GET request. For more information, see GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) // . For tagging-related restrictions related to characters and encodings, see Tag // Restrictions (https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html) // . Note that Amazon S3 limits the maximum number of tags to 10 tags per object. @@ -31,20 +31,18 @@ import ( // s3:PutObjectTagging action. By default, the bucket owner has this permission and // can grant this permission to others. To put tags of any other version, use the // versionId query parameter. You also need permission for the -// s3:PutObjectVersionTagging action. For information about the Amazon S3 object -// tagging feature, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) -// . PutObjectTagging has the following special errors: -// - Code: InvalidTagError -// - Cause: The tag provided was not a valid tag. This error can occur if the -// tag did not pass input validation. For more information, see Object Tagging (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html) +// s3:PutObjectVersionTagging action. PutObjectTagging has the following special +// errors. For more Amazon S3 errors see, Error Responses (https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) +// . +// - InvalidTag - The tag provided was not a valid tag. This error can occur if +// the tag did not pass input validation. For more information, see Object +// Tagging (https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-tagging.html) // . -// - Code: MalformedXMLError -// - Cause: The XML provided does not match the schema. -// - Code: OperationAbortedError -// - Cause: A conflicting conditional action is currently in progress against -// this resource. Please try again. -// - Code: InternalError -// - Cause: The service was unable to apply the provided tag to the object. +// - MalformedXML - The XML provided does not match the schema. +// - OperationAborted - A conflicting conditional action is currently in progress +// against this resource. Please try again. +// - InternalError - The service was unable to apply the provided tag to the +// object. // // The following operations are related to PutObjectTagging : // - GetObjectTagging (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html) @@ -116,9 +114,11 @@ type PutObjectTaggingInput struct { ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go index 02f57c74c..8aa349c74 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_PutPublicAccessBlock.go @@ -27,7 +27,7 @@ import ( // an object, it checks the PublicAccessBlock configuration for both the bucket // (or the bucket that contains the object) and the bucket owner's account. If the // PublicAccessBlock configurations are different between the bucket and the -// account, Amazon S3 uses the most restrictive combination of the bucket-level and +// account, S3 uses the most restrictive combination of the bucket-level and // account-level settings. For more information about when Amazon S3 considers a // bucket or an object public, see The Meaning of "Public" (https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status) // . The following operations are related to PutPublicAccessBlock : diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go index 1173c318f..0ccf96b3f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_RestoreObject.go @@ -224,9 +224,11 @@ type RestoreObjectInput struct { ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go index 461f48488..a3e1a7db4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPart.go @@ -202,9 +202,11 @@ type UploadPartInput struct { ExpectedBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go index 31a61ead9..ce9a179a7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_UploadPartCopy.go @@ -215,9 +215,11 @@ type UploadPartCopyInput struct { ExpectedSourceBucketOwner *string // Confirms that the requester knows that they will be charged for the request. - // Bucket owners need not specify this parameter in their requests. For information - // about downloading objects from Requester Pays buckets, see Downloading Objects - // in Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) + // Bucket owners need not specify this parameter in their requests. If either the + // source or destination Amazon S3 bucket has Requester Pays enabled, the requester + // will pay for corresponding charges to copy the object. For information about + // downloading objects from Requester Pays buckets, see Downloading Objects in + // Requester Pays Buckets (https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html) // in the Amazon S3 User Guide. RequestPayer types.RequestPayer diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go index e01ad934d..751f646dc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_op_WriteGetObjectResponse.go @@ -242,9 +242,9 @@ type WriteGetObjectResponseInput struct { // . SSECustomerKeyMD5 *string - // If present, specifies the ID of the Amazon Web Services Key Management Service - // (Amazon Web Services KMS) symmetric encryption customer managed key that was - // used for stored in Amazon S3 object. + // If present, specifies the ID (Key ID, Key ARN, or Key Alias) of the Amazon Web + // Services Key Management Service (Amazon Web Services KMS) symmetric encryption + // customer managed key that was used for stored in Amazon S3 object. SSEKMSKeyId *string // The server-side encryption algorithm used when storing requested object in diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go index c6012bdb2..7d57e70dc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/endpoints.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" internalendpoints "github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints" smithy "github.com/aws/smithy-go" @@ -18,6 +19,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" + "os" "strings" ) @@ -204,6 +206,24 @@ func resolveEndpointResolverV2(options *Options) { } } +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_S3") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "S3", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + // Utility function to aid with translating pseudo-regions to classical regions // with the appropriate setting indicated by the pseudo-region func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json index 414c8124f..b15270c4d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/generated.json @@ -111,6 +111,7 @@ "deserializers.go", "doc.go", "endpoints.go", + "endpoints_config_test.go", "endpoints_test.go", "eventstream.go", "generated.json", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go index 4cd8af5fa..c8f7012a0 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go @@ -3,4 +3,4 @@ package s3 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.38.5" +const goModuleVersion = "1.42.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go index 08756b48b..613da169d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/enums.go @@ -1006,10 +1006,11 @@ type ReplicationStatus string // Enum values for ReplicationStatus const ( - ReplicationStatusComplete ReplicationStatus = "COMPLETE" - ReplicationStatusPending ReplicationStatus = "PENDING" - ReplicationStatusFailed ReplicationStatus = "FAILED" - ReplicationStatusReplica ReplicationStatus = "REPLICA" + ReplicationStatusComplete ReplicationStatus = "COMPLETE" + ReplicationStatusPending ReplicationStatus = "PENDING" + ReplicationStatusFailed ReplicationStatus = "FAILED" + ReplicationStatusReplica ReplicationStatus = "REPLICA" + ReplicationStatusCompleted ReplicationStatus = "COMPLETED" ) // Values returns all known values for ReplicationStatus. Note that this can be @@ -1021,6 +1022,7 @@ func (ReplicationStatus) Values() []ReplicationStatus { "PENDING", "FAILED", "REPLICA", + "COMPLETED", } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go index 1b8a31174..3ddea0a24 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/types/types.go @@ -594,9 +594,10 @@ type Delete struct { // Information about the deleted object. type DeletedObject struct { - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. In a simple DELETE, this header indicates - // whether (true) or not (false) a delete marker was created. + // Indicates whether the specified object version that was permanently deleted was + // (true) or was not (false) a delete marker before deletion. In a simple DELETE, + // this header indicates whether (true) or not (false) the current version of the + // object is a delete marker. DeleteMarker bool // The version ID of the delete marker created as a result of the DELETE @@ -2997,17 +2998,17 @@ type ServerSideEncryptionByDefault struct { // Amazon Web Services Key Management Service (KMS) customer Amazon Web Services // KMS key ID to use for the default encryption. This parameter is allowed if and - // only if SSEAlgorithm is set to aws:kms . You can specify the key ID or the - // Amazon Resource Name (ARN) of the KMS key. If you use a key ID, you can run into - // a LogDestination undeliverable error when creating a VPC flow log. If you are - // using encryption with cross-account or Amazon Web Services service operations - // you must use a fully qualified KMS key ARN. For more information, see Using - // encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) - // . + // only if SSEAlgorithm is set to aws:kms . You can specify the key ID, key alias, + // or the Amazon Resource Name (ARN) of the KMS key. // - Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - Key ARN: // arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab - // Amazon S3 only supports symmetric encryption KMS keys. For more information, + // - Key Alias: alias/alias-name + // If you use a key ID, you can run into a LogDestination undeliverable error when + // creating a VPC flow log. If you are using encryption with cross-account or + // Amazon Web Services service operations you must use a fully qualified KMS key + // ARN. For more information, see Using encryption for cross-account operations (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy) + // . Amazon S3 only supports symmetric encryption KMS keys. For more information, // see Asymmetric keys in Amazon Web Services KMS (https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html) // in the Amazon Web Services Key Management Service Developer Guide. KMSMasterKeyID *string diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index abe977b8b..04222cda2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,34 @@ +# v1.17.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.16.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.0 (2023-10-02) + +* **Feature**: Fix FIPS Endpoints in aws-us-gov. + +# v1.14.1 (2023-09-22) + +* No change notes available for this release. + +# v1.14.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + # v1.13.6 (2023-08-31) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go index da4e470a6..a25792273 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go @@ -295,6 +295,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { resolveAWSEndpointResolver(cfg, &opts) resolveUseDualStackEndpoint(cfg, &opts) resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) return New(opts, optFns...) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go index 9395e91e2..115387059 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints" smithyendpoints "github.com/aws/smithy-go/endpoints" @@ -16,6 +17,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" + "os" "strings" ) @@ -194,6 +196,24 @@ func resolveEndpointResolverV2(options *Options) { } } +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + // Utility function to aid with translating pseudo-regions to classical regions // with the appropriate setting indicated by the pseudo-region func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { @@ -411,6 +431,25 @@ func (r *resolver) ResolveEndpoint( } if _UseFIPS == true { if true == _PartitionResult.SupportsFIPS { + if "aws-us-gov" == _PartitionResult.Name { + uriString := func() string { + var out strings.Builder + out.WriteString("https://portal.sso.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } uriString := func() string { var out strings.Builder out.WriteString("https://portal.sso-fips.") @@ -455,279 +494,6 @@ func (r *resolver) ResolveEndpoint( } return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") } - if _Region == "ap-east-1" { - uriString := "https://portal.sso.ap-east-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-northeast-1" { - uriString := "https://portal.sso.ap-northeast-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-northeast-2" { - uriString := "https://portal.sso.ap-northeast-2.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-northeast-3" { - uriString := "https://portal.sso.ap-northeast-3.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-south-1" { - uriString := "https://portal.sso.ap-south-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-southeast-1" { - uriString := "https://portal.sso.ap-southeast-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-southeast-2" { - uriString := "https://portal.sso.ap-southeast-2.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ca-central-1" { - uriString := "https://portal.sso.ca-central-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-central-1" { - uriString := "https://portal.sso.eu-central-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-north-1" { - uriString := "https://portal.sso.eu-north-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-south-1" { - uriString := "https://portal.sso.eu-south-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-west-1" { - uriString := "https://portal.sso.eu-west-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-west-2" { - uriString := "https://portal.sso.eu-west-2.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-west-3" { - uriString := "https://portal.sso.eu-west-3.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "me-south-1" { - uriString := "https://portal.sso.me-south-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "sa-east-1" { - uriString := "https://portal.sso.sa-east-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "us-east-1" { - uriString := "https://portal.sso.us-east-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "us-east-2" { - uriString := "https://portal.sso.us-east-2.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "us-west-2" { - uriString := "https://portal.sso.us-west-2.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "us-gov-east-1" { - uriString := "https://portal.sso.us-gov-east-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "us-gov-west-1" { - uriString := "https://portal.sso.us-gov-west-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } uriString := func() string { var out strings.Builder out.WriteString("https://portal.sso.") diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json index ab6af36e8..8e6184187 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json @@ -16,6 +16,7 @@ "deserializers.go", "doc.go", "endpoints.go", + "endpoints_config_test.go", "endpoints_test.go", "generated.json", "internal/endpoints/endpoints.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index 8b835c904..d5611c5cd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.6" +const goModuleVersion = "1.17.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go index 18849bab6..f044afde4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -227,6 +227,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-central-1", }, }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + }, endpoints.EndpointKey{ Region: "eu-north-1", }: endpoints.Endpoint{ @@ -359,6 +367,24 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.AwsCn, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-northwest-1", + }, + }, + }, }, { ID: "aws-iso", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 608ac60ce..057f9a25e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,38 @@ +# v1.19.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.18.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.3 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.2 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.17.1 (2023-09-22) + +* No change notes available for this release. + +# v1.17.0 (2023-09-20) + +* **Feature**: Update FIPS endpoints in aws-us-gov. + +# v1.16.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + +# v1.15.6 (2023-09-05) + +* No change notes available for this release. + # v1.15.5 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go index 24a692276..6a56093d8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go @@ -295,6 +295,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { resolveAWSEndpointResolver(cfg, &opts) resolveUseDualStackEndpoint(cfg, &opts) resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) return New(opts, optFns...) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go index 282d078b5..50b490cbd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints" smithyendpoints "github.com/aws/smithy-go/endpoints" @@ -16,6 +17,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" + "os" "strings" ) @@ -194,6 +196,24 @@ func resolveEndpointResolverV2(options *Options) { } } +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO_OIDC") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO OIDC", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + // Utility function to aid with translating pseudo-regions to classical regions // with the appropriate setting indicated by the pseudo-region func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { @@ -411,6 +431,25 @@ func (r *resolver) ResolveEndpoint( } if _UseFIPS == true { if true == _PartitionResult.SupportsFIPS { + if "aws-us-gov" == _PartitionResult.Name { + uriString := func() string { + var out strings.Builder + out.WriteString("https://oidc.") + out.WriteString(_Region) + out.WriteString(".amazonaws.com") + return out.String() + }() + + uri, err := url.Parse(uriString) + if err != nil { + return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) + } + + return smithyendpoints.Endpoint{ + URI: *uri, + Headers: http.Header{}, + }, nil + } uriString := func() string { var out strings.Builder out.WriteString("https://oidc-fips.") @@ -455,279 +494,6 @@ func (r *resolver) ResolveEndpoint( } return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack") } - if _Region == "ap-east-1" { - uriString := "https://oidc.ap-east-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-northeast-1" { - uriString := "https://oidc.ap-northeast-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-northeast-2" { - uriString := "https://oidc.ap-northeast-2.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-northeast-3" { - uriString := "https://oidc.ap-northeast-3.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-south-1" { - uriString := "https://oidc.ap-south-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-southeast-1" { - uriString := "https://oidc.ap-southeast-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ap-southeast-2" { - uriString := "https://oidc.ap-southeast-2.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "ca-central-1" { - uriString := "https://oidc.ca-central-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-central-1" { - uriString := "https://oidc.eu-central-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-north-1" { - uriString := "https://oidc.eu-north-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-south-1" { - uriString := "https://oidc.eu-south-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-west-1" { - uriString := "https://oidc.eu-west-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-west-2" { - uriString := "https://oidc.eu-west-2.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "eu-west-3" { - uriString := "https://oidc.eu-west-3.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "me-south-1" { - uriString := "https://oidc.me-south-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "sa-east-1" { - uriString := "https://oidc.sa-east-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "us-east-1" { - uriString := "https://oidc.us-east-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "us-east-2" { - uriString := "https://oidc.us-east-2.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "us-west-2" { - uriString := "https://oidc.us-west-2.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "us-gov-east-1" { - uriString := "https://oidc.us-gov-east-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } - if _Region == "us-gov-west-1" { - uriString := "https://oidc.us-gov-west-1.amazonaws.com" - - uri, err := url.Parse(uriString) - if err != nil { - return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString) - } - - return smithyendpoints.Endpoint{ - URI: *uri, - Headers: http.Header{}, - }, nil - } uriString := func() string { var out strings.Builder out.WriteString("https://oidc.") diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json index fe2d075ad..403fac7c5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json @@ -15,6 +15,7 @@ "deserializers.go", "doc.go", "endpoints.go", + "endpoints_config_test.go", "endpoints_test.go", "generated.json", "internal/endpoints/endpoints.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 435045414..856141700 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.15.5" +const goModuleVersion = "1.19.0" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go index 8df344e93..c48da8b88 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go @@ -227,6 +227,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-central-1", }, }, + endpoints.EndpointKey{ + Region: "eu-central-2", + }: endpoints.Endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "eu-central-2", + }, + }, endpoints.EndpointKey{ Region: "eu-north-1", }: endpoints.Endpoint{ @@ -267,6 +275,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "eu-west-3", }, }, + endpoints.EndpointKey{ + Region: "il-central-1", + }: endpoints.Endpoint{ + Hostname: "oidc.il-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "il-central-1", + }, + }, endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{ @@ -351,6 +367,24 @@ var defaultPartitions = endpoints.Partitions{ }, RegionRegex: partitionRegexp.AwsCn, IsRegionalized: true, + Endpoints: endpoints.Endpoints{ + endpoints.EndpointKey{ + Region: "cn-north-1", + }: endpoints.Endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-north-1", + }, + }, + endpoints.EndpointKey{ + Region: "cn-northwest-1", + }: endpoints.Endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: endpoints.CredentialScope{ + Region: "cn-northwest-1", + }, + }, + }, }, { ID: "aws-iso", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index a3f535af8..2e5ca8f87 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,30 @@ +# v1.25.0 (2023-11-01) + +* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.24.0 (2023-10-31) + +* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/). +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.2 (2023-10-12) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.1 (2023-10-06) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.23.0 (2023-10-02) + +* **Feature**: STS API updates for assumeRole + +# v1.22.0 (2023-09-18) + +* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service. +* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field. + # v1.21.5 (2023-08-21) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go index 22ac69043..c29d8cad1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go @@ -298,6 +298,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client { resolveAWSEndpointResolver(cfg, &opts) resolveUseDualStackEndpoint(cfg, &opts) resolveUseFIPSEndpoint(cfg, &opts) + resolveBaseEndpoint(cfg, &opts) return New(opts, optFns...) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go index ef1caae8d..cb5d56fd9 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go @@ -8,6 +8,7 @@ import ( "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" + internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources" "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn" internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints" smithy "github.com/aws/smithy-go" @@ -17,6 +18,7 @@ import ( smithyhttp "github.com/aws/smithy-go/transport/http" "net/http" "net/url" + "os" "strings" ) @@ -195,6 +197,24 @@ func resolveEndpointResolverV2(options *Options) { } } +func resolveBaseEndpoint(cfg aws.Config, o *Options) { + if cfg.BaseEndpoint != nil { + o.BaseEndpoint = cfg.BaseEndpoint + } + + _, g := os.LookupEnv("AWS_ENDPOINT_URL") + _, s := os.LookupEnv("AWS_ENDPOINT_URL_STS") + + if g && !s { + return + } + + value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "STS", cfg.ConfigSources) + if found && err == nil { + o.BaseEndpoint = &value + } +} + // Utility function to aid with translating pseudo-regions to classical regions // with the appropriate setting indicated by the pseudo-region func mapPseudoRegion(pr string) (region string, fips aws.FIPSEndpointState) { diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json index 2ae7a9b23..e44e7d149 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json @@ -21,6 +21,7 @@ "deserializers.go", "doc.go", "endpoints.go", + "endpoints_config_test.go", "endpoints_test.go", "generated.json", "internal/endpoints/endpoints.go", diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index f0a57a675..dac009e1c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.21.5" +const goModuleVersion = "1.25.0" diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index e92ab54e5..5d211f32c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -2572,21 +2572,81 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "appflow-fips.us-west-2.amazonaws.com", + }, }, }, "application-autoscaling": service{ @@ -4408,6 +4468,137 @@ var awsPartition = partition{ }, }, }, + "bedrock": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "bedrock-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-eu-central-1", + }: endpoint{ + Hostname: "bedrock.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-northeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-ap-southeast-1", + }: endpoint{ + Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-southeast-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-eu-central-1", + }: endpoint{ + Hostname: "bedrock-runtime.eu-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-fips-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-east-1", + }: endpoint{ + Hostname: "bedrock-runtime.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-runtime-us-west-2", + }: endpoint{ + Hostname: "bedrock-runtime.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "bedrock-us-east-1", + }: endpoint{ + Hostname: "bedrock.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "bedrock-us-west-2", + }: endpoint{ + Hostname: "bedrock.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "billingconductor": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -5711,6 +5902,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -7111,6 +7305,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, @@ -7206,6 +7403,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -7236,12 +7436,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -7254,6 +7460,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -7989,6 +8198,185 @@ var awsPartition = partition{ }, }, }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "datazone.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "datazone.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "datazone.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "datazone.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "datazone.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "datazone.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "datazone.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "datazone.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "datazone.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "datazone.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "datazone.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "datazone.ca-central-1.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.ca-central-1.amazonaws.com", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "datazone.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "datazone.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "datazone.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "datazone.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "datazone.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "datazone.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "datazone.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "datazone.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "datazone.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "datazone.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "datazone.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "datazone.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "datazone.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "datazone.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-east-2.amazonaws.com", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "datazone.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "datazone.us-west-2.api.aws", + }, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datazone-fips.us-west-2.amazonaws.com", + }, + }, + }, "dax": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -8782,6 +9170,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -10940,6 +11331,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -11023,6 +11420,12 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11041,6 +11444,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-central-1", + }: endpoint{ + Hostname: "email-fips.ca-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-central-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -11050,6 +11462,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-west-2", }: endpoint{ @@ -11080,9 +11510,21 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "email-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, @@ -11108,6 +11550,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -11385,63 +11830,183 @@ var awsPartition = partition{ endpointKey{ Region: "af-south-1", }: endpoint{}, + endpointKey{ + Region: "af-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.af-south-1.api.aws", + }, endpointKey{ Region: "ap-east-1", }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-east-1.api.aws", + }, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-1.api.aws", + }, endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-2.api.aws", + }, endpointKey{ Region: "ap-northeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-northeast-3.api.aws", + }, endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-1.api.aws", + }, endpointKey{ Region: "ap-south-2", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-south-2.api.aws", + }, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-1.api.aws", + }, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-2.api.aws", + }, endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ap-southeast-4.api.aws", + }, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-central-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-central-2.api.aws", + }, endpointKey{ Region: "eu-north-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-north-1.api.aws", + }, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-1.api.aws", + }, endpointKey{ Region: "eu-south-2", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-south-2.api.aws", + }, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-1.api.aws", + }, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-2.api.aws", + }, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.eu-west-3.api.aws", + }, endpointKey{ Region: "fips", }: endpoint{ @@ -11454,18 +12019,48 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.il-central-1.api.aws", + }, endpointKey{ Region: "me-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-central-1.api.aws", + }, endpointKey{ Region: "me-south-1", }: endpoint{}, + endpointKey{ + Region: "me-south-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.sa-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-1.api.aws", + }, endpointKey{ Region: "us-east-1", Variant: fipsVariant, @@ -11484,6 +12079,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-east-2.api.aws", + }, endpointKey{ Region: "us-east-2", Variant: fipsVariant, @@ -11502,6 +12103,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-1.api.aws", + }, endpointKey{ Region: "us-west-1", Variant: fipsVariant, @@ -11520,6 +12127,12 @@ var awsPartition = partition{ endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-west-2.api.aws", + }, endpointKey{ Region: "us-west-2", Variant: fipsVariant, @@ -11736,12 +12349,27 @@ var awsPartition = partition{ }, "finspace": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -12843,16 +13471,6 @@ var awsPartition = partition{ }: endpoint{}, }, }, - "gamesparks": service{ - Endpoints: serviceEndpoints{ - endpointKey{ - Region: "ap-northeast-1", - }: endpoint{}, - endpointKey{ - Region: "us-east-1", - }: endpoint{}, - }, - }, "geo": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -13817,6 +14435,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -13832,6 +14453,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -14076,6 +14700,9 @@ var awsPartition = partition{ }, "inspector2": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-east-1", }: endpoint{}, @@ -14085,6 +14712,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -14094,12 +14724,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -14272,7 +14908,7 @@ var awsPartition = partition{ Region: "ca-central-1", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.ca-central-1.api.aws", + Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com", }, endpointKey{ Region: "eu-central-1", @@ -14343,7 +14979,7 @@ var awsPartition = partition{ Region: "us-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-east-1.api.aws", + Hostname: "internetmonitor-fips.us-east-1.amazonaws.com", }, endpointKey{ Region: "us-east-2", @@ -14354,7 +14990,7 @@ var awsPartition = partition{ Region: "us-east-2", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-east-2.api.aws", + Hostname: "internetmonitor-fips.us-east-2.amazonaws.com", }, endpointKey{ Region: "us-west-1", @@ -14365,7 +15001,7 @@ var awsPartition = partition{ Region: "us-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-west-1.api.aws", + Hostname: "internetmonitor-fips.us-west-1.amazonaws.com", }, endpointKey{ Region: "us-west-2", @@ -14376,7 +15012,7 @@ var awsPartition = partition{ Region: "us-west-2", Variant: fipsVariant, }: endpoint{ - Hostname: "internetmonitor-fips.us-west-2.api.aws", + Hostname: "internetmonitor-fips.us-west-2.amazonaws.com", }, }, }, @@ -15095,12 +15731,45 @@ var awsPartition = partition{ }, "iottwinmaker": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "api-ap-northeast-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "api-ap-northeast-2", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "api-ap-south-1", + }: endpoint{ + Hostname: "api.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "api-ap-southeast-1", }: endpoint{ @@ -15149,6 +15818,30 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + endpointKey{ + Region: "data-ap-northeast-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-1", + }, + }, + endpointKey{ + Region: "data-ap-northeast-2", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + endpointKey{ + Region: "data-ap-south-1", + }: endpoint{ + Hostname: "data.iottwinmaker.ap-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-south-1", + }, + }, endpointKey{ Region: "data-ap-southeast-1", }: endpoint{ @@ -15505,6 +16198,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -17141,6 +17837,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17150,12 +17849,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -17210,6 +17915,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -17422,6 +18130,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -17431,12 +18142,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -17491,6 +18208,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -18099,6 +18819,13 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "managedblockchain-query": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + }, + }, "marketplacecommerceanalytics": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -18448,6 +19175,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -18500,6 +19230,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -18552,6 +19285,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -18629,12 +19365,33 @@ var awsPartition = partition{ }, "meetings-chime": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -19859,6 +20616,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -20153,6 +20913,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "oidc.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -20193,6 +20961,14 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "oidc.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -20295,6 +21071,14 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "omics.il-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "il-central-1", + }, + }, endpointKey{ Region: "us-east-1", }: endpoint{ @@ -20976,6 +21760,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -20991,12 +21778,18 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, endpointKey{ Region: "eu-south-1", }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -21243,6 +22036,14 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "portal.sso.eu-central-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-central-2", + }, + }, endpointKey{ Region: "eu-north-1", }: endpoint{ @@ -22941,6 +23742,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.ap-southeast-2.api.aws", }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "resource-explorer-2.ap-southeast-3.api.aws", + }, endpointKey{ Region: "ap-southeast-4", }: endpoint{ @@ -22986,6 +23792,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "resource-explorer-2.il-central-1.api.aws", }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "resource-explorer-2.me-south-1.api.aws", + }, endpointKey{ Region: "sa-east-1", }: endpoint{ @@ -25303,6 +26114,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, endpointKey{ Region: "ap-south-1", }: endpoint{}, @@ -25312,15 +26126,24 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, endpointKey{ Region: "eu-west-2", }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -27774,6 +28597,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -27789,6 +28615,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -28912,15 +29741,30 @@ var awsPartition = partition{ }, "tnb": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -29343,6 +30187,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -29588,6 +30435,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -31851,6 +32701,20 @@ var awscnPartition = partition{ }, }, }, + "api.pricing": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + CredentialScope: credentialScope{ + Service: "pricing", + }, + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "api.sagemaker": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32268,6 +33132,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "datazone.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "datazone.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "dax": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -32480,9 +33369,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "emr-containers": service{ @@ -32510,9 +33411,21 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn", + }, endpointKey{ Region: "cn-northwest-1", }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn", + }, }, }, "events": service{ @@ -32670,6 +33583,16 @@ var awscnPartition = partition{ }, }, }, + "identitystore": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "internetmonitor": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -32969,6 +33892,26 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "oidc": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "oidc.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "oidc.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "organizations": service{ PartitionEndpoint: "aws-cn-global", IsRegionalized: boxedFalse, @@ -33007,6 +33950,26 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "portal.sso": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "portal.sso.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "ram": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -33438,7 +34401,7 @@ var awscnPartition = partition{ }: endpoint{}, }, }, - "states": service{ + "sso": service{ Endpoints: serviceEndpoints{ endpointKey{ Region: "cn-north-1", @@ -33448,6 +34411,28 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "states": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-north-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "storagegateway": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -34957,6 +35942,13 @@ var awsusgovPartition = partition{ }, }, }, + "codestar-connections": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + }, + }, "cognito-identity": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35300,6 +36292,31 @@ var awsusgovPartition = partition{ }, }, }, + "datazone": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "datazone.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "datazone.us-gov-west-1.api.aws", + }, + }, + }, "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -35902,6 +36919,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -35913,6 +36936,13 @@ var awsusgovPartition = partition{ }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-gov-west-1.api.aws", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -35968,6 +36998,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, @@ -35986,6 +37022,12 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, @@ -36222,6 +37264,28 @@ var awsusgovPartition = partition{ }, }, }, + "geo": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "geo-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "glacier": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -36288,21 +37352,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "glue-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "glue.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "glue-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "glue-fips.us-gov-west-1.api.aws", + }, }, }, "greengrass": service{ @@ -37137,21 +38225,45 @@ var awsusgovPartition = partition{ endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-east-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lakeformation.us-gov-west-1.api.aws", + }, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com", }, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "lakeformation-fips.us-gov-west-1.api.aws", + }, }, }, "lambda": service{ @@ -37296,6 +38408,36 @@ var awsusgovPartition = partition{ }, }, }, + "m2": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{}, + }, + }, "managedblockchain": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -38093,6 +39235,46 @@ var awsusgovPartition = partition{ }, }, }, + "resiliencehub": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com", + }, + }, + }, "resource-explorer-2": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{ @@ -38173,6 +39355,16 @@ var awsusgovPartition = partition{ }: endpoint{}, }, }, + "rolesanywhere": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + }, + }, "route53": service{ PartitionEndpoint: "aws-us-gov-global", IsRegionalized: boxedFalse, @@ -38881,10 +40073,20 @@ var awsusgovPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", - }: endpoint{}, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{}, + }: endpoint{ + Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, }, }, "sms": service{ @@ -40242,14 +41444,45 @@ var awsisoPartition = partition{ }, "elasticmapreduce": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{ Protocols: []string{"https"}, }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"https"}, + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov", + }, }, }, "es": service{ @@ -40801,6 +42034,13 @@ var awsisobPartition = partition{ }, }, }, + "api.sagemaker": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -41035,9 +42275,24 @@ var awsisobPartition = partition{ }, "elasticmapreduce": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "es": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index d6fa24776..93bb5de64 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -171,6 +171,12 @@ type envConfig struct { // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6 EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // AWS_EC2_METADATA_V1_DISABLED=true + EC2IMDSv1Disabled *bool + // Specifies that SDK clients must resolve a dual-stack endpoint for // services. // @@ -251,6 +257,9 @@ var ( ec2IMDSEndpointModeEnvKey = []string{ "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE", } + ec2MetadataV1DisabledEnvKey = []string{ + "AWS_EC2_METADATA_V1_DISABLED", + } useCABundleKey = []string{ "AWS_CA_BUNDLE", } @@ -393,6 +402,7 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil { return envConfig{}, err } + setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, ec2MetadataV1DisabledEnvKey) if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil { return cfg, err @@ -414,6 +424,24 @@ func setFromEnvVal(dst *string, keys []string) { } } +func setBoolPtrFromEnvVal(dst **bool, keys []string) { + for _, k := range keys { + value := os.Getenv(k) + if len(value) == 0 { + continue + } + + switch { + case strings.EqualFold(value, "false"): + *dst = new(bool) + **dst = false + case strings.EqualFold(value, "true"): + *dst = new(bool) + **dst = true + } + } +} + func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error { for _, k := range keys { value := os.Getenv(k) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 8127c99a9..3c88dee52 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -779,6 +779,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode) } + cfg.EC2MetadataEnableFallback = userCfg.EC2MetadataEnableFallback + if cfg.EC2MetadataEnableFallback == nil && envCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*envCfg.EC2IMDSv1Disabled) + } + if cfg.EC2MetadataEnableFallback == nil && sharedCfg.EC2IMDSv1Disabled != nil { + cfg.EC2MetadataEnableFallback = aws.Bool(!*sharedCfg.EC2IMDSv1Disabled) + } + cfg.S3UseARNRegion = userCfg.S3UseARNRegion if cfg.S3UseARNRegion == nil { cfg.S3UseARNRegion = &envCfg.S3UseARNRegion diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index ea3ac0d03..f3ce8183d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -80,6 +80,9 @@ const ( // EC2 IMDS Endpoint ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint" + // ECS IMDSv1 disable fallback + ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled" + // Use DualStack Endpoint Resolution useDualStackEndpoint = "use_dualstack_endpoint" @@ -179,6 +182,12 @@ type sharedConfig struct { // ec2_metadata_service_endpoint=http://fd00:ec2::254 EC2IMDSEndpoint string + // Specifies that IMDS clients should not fallback to IMDSv1 if token + // requests fail. + // + // ec2_metadata_v1_disabled=true + EC2IMDSv1Disabled *bool + // Specifies that SDK clients must resolve a dual-stack endpoint for // services. // @@ -389,8 +398,15 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.Region, section, regionKey) updateString(&cfg.CustomCABundle, section, customCABundleKey) + // we're retaining a behavioral quirk with this field that existed before + // the removal of literal parsing for (aws-sdk-go-v2/#2276): + // - if the key is missing, the config field will not be set + // - if the key is set to a non-numeric, the config field will be set to 0 if section.Has(roleDurationSecondsKey) { - d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second + var d time.Duration + if v, ok := section.Int(roleDurationSecondsKey); ok { + d = time.Duration(v) * time.Second + } cfg.AssumeRoleDuration = &d } @@ -427,6 +443,7 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e ec2MetadataServiceEndpointModeKey, file.Filename, err) } updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey) + updateBoolPtr(&cfg.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey) updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint) @@ -668,7 +685,10 @@ func updateBool(dst *bool, section ini.Section, key string) { if !section.Has(key) { return } - *dst = section.Bool(key) + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) + *dst = v } // updateBoolPtr will only update the dst with the value in the section key, @@ -677,8 +697,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) { if !section.Has(key) { return } + + // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false + v, _ := section.Bool(key) *dst = new(bool) - **dst = section.Bool(key) + **dst = v } // SharedConfigLoadError is an error for the shared config file failed to load. @@ -805,7 +828,8 @@ func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section i return } - if section.Bool(key) { + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = endpoints.DualStackEndpointStateEnabled } else { *dst = endpoints.DualStackEndpointStateDisabled @@ -821,7 +845,8 @@ func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section return } - if section.Bool(key) { + // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false + if v, _ := section.Bool(key); v { *dst = endpoints.FIPSEndpointStateEnabled } else { *dst = endpoints.FIPSEndpointStateDisabled diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 6291b0bb6..6c3447fa8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.45.1" +const SDKVersion = "1.47.2" diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go index 34a481afb..b1b686086 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go @@ -154,11 +154,11 @@ func (v ValueType) String() string { // ValueType enums const ( NoneType = ValueType(iota) - DecimalType - IntegerType + DecimalType // deprecated + IntegerType // deprecated StringType QuotedStringType - BoolType + BoolType // deprecated ) // Value is a union container @@ -166,9 +166,9 @@ type Value struct { Type ValueType raw []rune - integer int64 - decimal float64 - boolean bool + integer int64 // deprecated + decimal float64 // deprecated + boolean bool // deprecated str string } @@ -253,24 +253,6 @@ func newLitToken(b []rune) (Token, int, error) { } token = newToken(TokenLit, b[:n], QuotedStringType) - } else if isNumberValue(b) { - var base int - base, n, err = getNumericalValue(b) - if err != nil { - return token, 0, err - } - - value := b[:n] - vType := IntegerType - if contains(value, '.') || hasExponent(value) { - vType = DecimalType - } - token = newToken(TokenLit, value, vType) - token.base = base - } else if isBoolValue(b) { - n, err = getBoolValue(b) - - token = newToken(TokenLit, b[:n], BoolType) } else { n, err = getValue(b) token = newToken(TokenLit, b[:n], StringType) @@ -280,18 +262,33 @@ func newLitToken(b []rune) (Token, int, error) { } // IntValue returns an integer value -func (v Value) IntValue() int64 { - return v.integer +func (v Value) IntValue() (int64, bool) { + i, err := strconv.ParseInt(string(v.raw), 0, 64) + if err != nil { + return 0, false + } + return i, true } // FloatValue returns a float value -func (v Value) FloatValue() float64 { - return v.decimal +func (v Value) FloatValue() (float64, bool) { + f, err := strconv.ParseFloat(string(v.raw), 64) + if err != nil { + return 0, false + } + return f, true } // BoolValue returns a bool value -func (v Value) BoolValue() bool { - return v.boolean +func (v Value) BoolValue() (bool, bool) { + // we don't use ParseBool as it recognizes more than what we've + // historically supported + if isCaselessLitValue(runesTrue, v.raw) { + return true, true + } else if isCaselessLitValue(runesFalse, v.raw) { + return false, true + } + return false, false } func isTrimmable(r rune) bool { diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go index 081cf4334..1d08e138a 100644 --- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go +++ b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go @@ -145,17 +145,17 @@ func (t Section) ValueType(k string) (ValueType, bool) { } // Bool returns a bool value at k -func (t Section) Bool(k string) bool { +func (t Section) Bool(k string) (bool, bool) { return t.values[k].BoolValue() } // Int returns an integer value at k -func (t Section) Int(k string) int64 { +func (t Section) Int(k string) (int64, bool) { return t.values[k].IntValue() } // Float64 returns a float value at k -func (t Section) Float64(k string) float64 { +func (t Section) Float64(k string) (float64, bool) { return t.values[k].FloatValue() } diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index b9171b88b..9cca07b55 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,15 @@ +# Release (2023-10-31) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.16.0 + * **Feature**: **LANG**: Bump minimum go version to 1.19. + +# Release (2023-10-06) + +## Module Highlights +* `github.com/aws/smithy-go`: v1.15.0 + * **Feature**: Add `http.WithHeaderComment` middleware. + # Release (2023-08-18) * No change notes available for this release. diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index a4bb43fbe..c374f6928 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -6,6 +6,21 @@ **WARNING: All interfaces are subject to change.** +## Can I use this? + +In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java), +such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html), +in order to generate transport mechanisms and serialization/deserialization +code ("serde") accordingly. + +The code generator does not currently support any protocols out of the box, +therefore the useability of this project on its own is currently limited. +Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html) +exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are +tracking the movement of those out of the SDK into smithy-go in +[#458](https://github.com/aws/smithy-go/issues/458), but there's currently no +timeline for doing so. + ## License This project is licensed under the Apache-2.0 License. diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index 997c30924..d96be806d 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.14.2" +const goModuleVersion = "1.16.0" diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go new file mode 100644 index 000000000..855c22720 --- /dev/null +++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go @@ -0,0 +1,81 @@ +package http + +import ( + "context" + "fmt" + "net/http" + + "github.com/aws/smithy-go/middleware" +) + +// WithHeaderComment instruments a middleware stack to append an HTTP field +// comment to the given header as specified in RFC 9110 +// (https://www.rfc-editor.org/rfc/rfc9110#name-comments). +// +// The header is case-insensitive. If the provided header exists when the +// middleware runs, the content will be inserted as-is enclosed in parentheses. +// +// Note that per the HTTP specification, comments are only allowed in fields +// containing "comment" as part of their field value definition, but this API +// will NOT verify whether the provided header is one of them. +// +// WithHeaderComment MAY be applied more than once to a middleware stack and/or +// more than once per header. +func WithHeaderComment(header, content string) func(*middleware.Stack) error { + return func(s *middleware.Stack) error { + m, err := getOrAddHeaderComment(s) + if err != nil { + return fmt.Errorf("get or add header comment: %v", err) + } + + m.values.Add(header, content) + return nil + } +} + +type headerCommentMiddleware struct { + values http.Header // hijack case-insensitive access APIs +} + +func (*headerCommentMiddleware) ID() string { + return "headerComment" +} + +func (m *headerCommentMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) ( + out middleware.BuildOutput, metadata middleware.Metadata, err error, +) { + r, ok := in.Request.(*Request) + if !ok { + return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) + } + + for h, contents := range m.values { + for _, c := range contents { + if existing := r.Header.Get(h); existing != "" { + r.Header.Set(h, fmt.Sprintf("%s (%s)", existing, c)) + } + } + } + + return next.HandleBuild(ctx, in) +} + +func getOrAddHeaderComment(s *middleware.Stack) (*headerCommentMiddleware, error) { + id := (*headerCommentMiddleware)(nil).ID() + m, ok := s.Build.Get(id) + if !ok { + m := &headerCommentMiddleware{values: http.Header{}} + if err := s.Build.Add(m, middleware.After); err != nil { + return nil, fmt.Errorf("add build: %v", err) + } + + return m, nil + } + + hc, ok := m.(*headerCommentMiddleware) + if !ok { + return nil, fmt.Errorf("existing middleware w/ id %s is not *headerCommentMiddleware", id) + } + + return hc, nil +} diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go index b48005673..42bf32aab 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/md2man.go @@ -9,6 +9,8 @@ func Render(doc []byte) []byte { renderer := NewRoffRenderer() return blackfriday.Run(doc, - []blackfriday.Option{blackfriday.WithRenderer(renderer), - blackfriday.WithExtensions(renderer.GetExtensions())}...) + []blackfriday.Option{ + blackfriday.WithRenderer(renderer), + blackfriday.WithExtensions(renderer.GetExtensions()), + }...) } diff --git a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go index be2b34360..4b19188d9 100644 --- a/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go +++ b/vendor/github.com/cpuguy83/go-md2man/v2/md2man/roff.go @@ -1,6 +1,7 @@ package md2man import ( + "bytes" "fmt" "io" "os" @@ -34,10 +35,10 @@ const ( hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n" linkTag = "\n\\[la]" linkCloseTag = "\\[ra]" - codespanTag = "\\fB\\fC" + codespanTag = "\\fB" codespanCloseTag = "\\fR" - codeTag = "\n.PP\n.RS\n\n.nf\n" - codeCloseTag = "\n.fi\n.RE\n" + codeTag = "\n.EX\n" + codeCloseTag = "\n.EE\n" quoteTag = "\n.PP\n.RS\n" quoteCloseTag = "\n.RE\n" listTag = "\n.RS\n" @@ -86,8 +87,7 @@ func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { // RenderNode is called for each node in a markdown document; based on the node // type the equivalent roff output is sent to the writer func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { - - var walkAction = blackfriday.GoToNext + walkAction := blackfriday.GoToNext switch node.Type { case blackfriday.Text: @@ -109,9 +109,16 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering out(w, strongCloseTag) } case blackfriday.Link: - if !entering { - out(w, linkTag+string(node.LinkData.Destination)+linkCloseTag) + // Don't render the link text for automatic links, because this + // will only duplicate the URL in the roff output. + // See https://daringfireball.net/projects/markdown/syntax#autolink + if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) { + out(w, string(node.FirstChild.Literal)) } + // Hyphens in a link must be escaped to avoid word-wrap in the rendered man page. + escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-") + out(w, linkTag+escapedLink+linkCloseTag) + walkAction = blackfriday.SkipChildren case blackfriday.Image: // ignore images walkAction = blackfriday.SkipChildren @@ -160,6 +167,11 @@ func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering r.handleTableCell(w, node, entering) case blackfriday.HTMLSpan: // ignore other HTML tags + case blackfriday.HTMLBlock: + if bytes.HasPrefix(node.Literal, []byte(" |Gauge, Sum, Histogram, Summary, ... | +// +------------+ +------------------------------------+ +// +// Data [One of Gauge, Sum, Histogram, Summary, ...] +// +-----------+ +// |... | // Metadata about the Data. +// |points |--+ +// +-----------+ | +// | +---------------------------+ +// | |DataPoint 1 | +// v |+------+------+ +------+ | +// +-----+ ||label |label |...|label | | +// | 1 |-->||value1|value2|...|valueN| | +// +-----+ |+------+------+ +------+ | +// | . | |+-----+ | +// | . | ||value| | +// | . | |+-----+ | +// | . | +---------------------------+ +// | . | . +// | . | . +// | . | . +// | . | +---------------------------+ +// | . | |DataPoint M | +// +-----+ |+------+------+ +------+ | +// | M |-->||label |label |...|label | | +// +-----+ ||value1|value2|...|valueN| | +// |+------+------+ +------+ | +// |+-----+ | +// ||value| | +// |+-----+ | +// +---------------------------+ +// +// Each distinct type of DataPoint represents the output of a specific +// aggregation function, the result of applying the DataPoint's +// associated function of to one or more measurements. +// +// All DataPoint types have three common fields: +// - Attributes includes key-value pairs associated with the data point +// - TimeUnixNano is required, set to the end time of the aggregation +// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints +// having an AggregationTemporality field, as discussed below. +// +// Both TimeUnixNano and StartTimeUnixNano values are expressed as +// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. +// +// # TimeUnixNano +// +// This field is required, having consistent interpretation across +// DataPoint types. TimeUnixNano is the moment corresponding to when +// the data point's aggregate value was captured. +// +// Data points with the 0 value for TimeUnixNano SHOULD be rejected +// by consumers. +// +// # StartTimeUnixNano +// +// StartTimeUnixNano in general allows detecting when a sequence of +// observations is unbroken. This field indicates to consumers the +// start time for points with cumulative and delta +// AggregationTemporality, and it should be included whenever possible +// to support correct rate calculation. Although it may be omitted +// when the start time is truly unknown, setting StartTimeUnixNano is +// strongly encouraged. +type Metric struct { + // name of the metric, including its DNS name prefix. It must be unique. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` + // Data determines the aggregation type (if any) of the metric, what is the + // reported value type for the data points, as well as the relatationship to + // the time interval over which they are reported. + // + // Types that are valid to be assigned to Data: + // *Metric_Gauge + // *Metric_Sum + // *Metric_Histogram + // *Metric_ExponentialHistogram + // *Metric_Summary + Data isMetric_Data `protobuf_oneof:"data"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{3} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return m.Size() +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +type isMetric_Data interface { + isMetric_Data() + MarshalTo([]byte) (int, error) + Size() int +} + +type Metric_Gauge struct { + Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof" json:"gauge,omitempty"` +} +type Metric_Sum struct { + Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof" json:"sum,omitempty"` +} +type Metric_Histogram struct { + Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof" json:"histogram,omitempty"` +} +type Metric_ExponentialHistogram struct { + ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof" json:"exponential_histogram,omitempty"` +} +type Metric_Summary struct { + Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof" json:"summary,omitempty"` +} + +func (*Metric_Gauge) isMetric_Data() {} +func (*Metric_Sum) isMetric_Data() {} +func (*Metric_Histogram) isMetric_Data() {} +func (*Metric_ExponentialHistogram) isMetric_Data() {} +func (*Metric_Summary) isMetric_Data() {} + +func (m *Metric) GetData() isMetric_Data { + if m != nil { + return m.Data + } + return nil +} + +func (m *Metric) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Metric) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Metric) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *Metric) GetGauge() *Gauge { + if x, ok := m.GetData().(*Metric_Gauge); ok { + return x.Gauge + } + return nil +} + +func (m *Metric) GetSum() *Sum { + if x, ok := m.GetData().(*Metric_Sum); ok { + return x.Sum + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if x, ok := m.GetData().(*Metric_Histogram); ok { + return x.Histogram + } + return nil +} + +func (m *Metric) GetExponentialHistogram() *ExponentialHistogram { + if x, ok := m.GetData().(*Metric_ExponentialHistogram); ok { + return x.ExponentialHistogram + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if x, ok := m.GetData().(*Metric_Summary); ok { + return x.Summary + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Metric) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Metric_Gauge)(nil), + (*Metric_Sum)(nil), + (*Metric_Histogram)(nil), + (*Metric_ExponentialHistogram)(nil), + (*Metric_Summary)(nil), + } +} + +// Gauge represents the type of a scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +type Gauge struct { + DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{4} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(m, src) +} +func (m *Gauge) XXX_Size() int { + return m.Size() +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetDataPoints() []*NumberDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +// Sum represents the type of a scalar metric that is calculated as a sum of all +// reported measurements over a time interval. +type Sum struct { + DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` + // If "true" means that the sum is monotonic. + IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` +} + +func (m *Sum) Reset() { *m = Sum{} } +func (m *Sum) String() string { return proto.CompactTextString(m) } +func (*Sum) ProtoMessage() {} +func (*Sum) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{5} +} +func (m *Sum) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Sum.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Sum) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sum.Merge(m, src) +} +func (m *Sum) XXX_Size() int { + return m.Size() +} +func (m *Sum) XXX_DiscardUnknown() { + xxx_messageInfo_Sum.DiscardUnknown(m) +} + +var xxx_messageInfo_Sum proto.InternalMessageInfo + +func (m *Sum) GetDataPoints() []*NumberDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *Sum) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +func (m *Sum) GetIsMonotonic() bool { + if m != nil { + return m.IsMonotonic + } + return false +} + +// Histogram represents the type of a metric that is calculated by aggregating +// as a Histogram of all reported measurements over a time interval. +type Histogram struct { + DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{6} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(m, src) +} +func (m *Histogram) XXX_Size() int { + return m.Size() +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetDataPoints() []*HistogramDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *Histogram) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// ExponentialHistogram represents the type of a metric that is calculated by aggregating +// as a ExponentialHistogram of all reported double measurements over a time interval. +type ExponentialHistogram struct { + DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.proto.metrics.v1.AggregationTemporality" json:"aggregation_temporality,omitempty"` +} + +func (m *ExponentialHistogram) Reset() { *m = ExponentialHistogram{} } +func (m *ExponentialHistogram) String() string { return proto.CompactTextString(m) } +func (*ExponentialHistogram) ProtoMessage() {} +func (*ExponentialHistogram) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{7} +} +func (m *ExponentialHistogram) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExponentialHistogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExponentialHistogram.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExponentialHistogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExponentialHistogram.Merge(m, src) +} +func (m *ExponentialHistogram) XXX_Size() int { + return m.Size() +} +func (m *ExponentialHistogram) XXX_DiscardUnknown() { + xxx_messageInfo_ExponentialHistogram.DiscardUnknown(m) +} + +var xxx_messageInfo_ExponentialHistogram proto.InternalMessageInfo + +func (m *ExponentialHistogram) GetDataPoints() []*ExponentialHistogramDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +func (m *ExponentialHistogram) GetAggregationTemporality() AggregationTemporality { + if m != nil { + return m.AggregationTemporality + } + return AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED +} + +// Summary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) +// data type. These data points cannot always be merged in a meaningful way. +// While they can be useful in some applications, histogram data points are +// recommended for new applications. +type Summary struct { + DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{8} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(m, src) +} +func (m *Summary) XXX_Size() int { + return m.Size() +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetDataPoints() []*SummaryDataPoint { + if m != nil { + return m.DataPoints + } + return nil +} + +// NumberDataPoint is a single data point in a timeseries that describes the +// time-varying scalar value of a metric. +type NumberDataPoint struct { + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // The value itself. A point is considered invalid when one of the recognized + // value fields is not present inside this oneof. + // + // Types that are valid to be assigned to Value: + // *NumberDataPoint_AsDouble + // *NumberDataPoint_AsInt + Value isNumberDataPoint_Value `protobuf_oneof:"value"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (m *NumberDataPoint) Reset() { *m = NumberDataPoint{} } +func (m *NumberDataPoint) String() string { return proto.CompactTextString(m) } +func (*NumberDataPoint) ProtoMessage() {} +func (*NumberDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{9} +} +func (m *NumberDataPoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NumberDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NumberDataPoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NumberDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_NumberDataPoint.Merge(m, src) +} +func (m *NumberDataPoint) XXX_Size() int { + return m.Size() +} +func (m *NumberDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_NumberDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_NumberDataPoint proto.InternalMessageInfo + +type isNumberDataPoint_Value interface { + isNumberDataPoint_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type NumberDataPoint_AsDouble struct { + AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof" json:"as_double,omitempty"` +} +type NumberDataPoint_AsInt struct { + AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof" json:"as_int,omitempty"` +} + +func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {} +func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {} + +func (m *NumberDataPoint) GetValue() isNumberDataPoint_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *NumberDataPoint) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *NumberDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *NumberDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *NumberDataPoint) GetAsDouble() float64 { + if x, ok := m.GetValue().(*NumberDataPoint_AsDouble); ok { + return x.AsDouble + } + return 0 +} + +func (m *NumberDataPoint) GetAsInt() int64 { + if x, ok := m.GetValue().(*NumberDataPoint_AsInt); ok { + return x.AsInt + } + return 0 +} + +func (m *NumberDataPoint) GetExemplars() []Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +func (m *NumberDataPoint) GetFlags() uint32 { + if m != nil { + return m.Flags + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*NumberDataPoint) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*NumberDataPoint_AsDouble)(nil), + (*NumberDataPoint_AsInt)(nil), + } +} + +// HistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Histogram. A Histogram contains summary statistics +// for a population of values, it may optionally contain the distribution of +// those values across a set of buckets. +// +// If the histogram contains the distribution of values, then both +// "explicit_bounds" and "bucket counts" fields must be defined. +// If the histogram does not contain the distribution of values, then both +// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and +// "sum" are known. +type HistogramDataPoint struct { + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + // + // Types that are valid to be assigned to Sum_: + // *HistogramDataPoint_Sum + Sum_ isHistogramDataPoint_Sum_ `protobuf_oneof:"sum_"` + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // + // The boundaries for bucket at index i are: + // + // (-infinity, explicit_bounds[i]] for i == 0 + // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) + // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) + // + // The values in the explicit_bounds array must be strictly increasing. + // + // Histogram buckets are inclusive of their upper boundary, except the last + // bucket where the boundary is at infinity. This format is intentionally + // compatible with the OpenMetrics histogram definition. + ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"` + // min is the minimum value over (start_time, end_time]. + // + // Types that are valid to be assigned to Min_: + // *HistogramDataPoint_Min + Min_ isHistogramDataPoint_Min_ `protobuf_oneof:"min_"` + // max is the maximum value over (start_time, end_time]. + // + // Types that are valid to be assigned to Max_: + // *HistogramDataPoint_Max + Max_ isHistogramDataPoint_Max_ `protobuf_oneof:"max_"` +} + +func (m *HistogramDataPoint) Reset() { *m = HistogramDataPoint{} } +func (m *HistogramDataPoint) String() string { return proto.CompactTextString(m) } +func (*HistogramDataPoint) ProtoMessage() {} +func (*HistogramDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{10} +} +func (m *HistogramDataPoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HistogramDataPoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HistogramDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_HistogramDataPoint.Merge(m, src) +} +func (m *HistogramDataPoint) XXX_Size() int { + return m.Size() +} +func (m *HistogramDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_HistogramDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_HistogramDataPoint proto.InternalMessageInfo + +type isHistogramDataPoint_Sum_ interface { + isHistogramDataPoint_Sum_() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogramDataPoint_Min_ interface { + isHistogramDataPoint_Min_() + MarshalTo([]byte) (int, error) + Size() int +} +type isHistogramDataPoint_Max_ interface { + isHistogramDataPoint_Max_() + MarshalTo([]byte) (int, error) + Size() int +} + +type HistogramDataPoint_Sum struct { + Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"` +} +type HistogramDataPoint_Min struct { + Min float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"` +} +type HistogramDataPoint_Max struct { + Max float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"` +} + +func (*HistogramDataPoint_Sum) isHistogramDataPoint_Sum_() {} +func (*HistogramDataPoint_Min) isHistogramDataPoint_Min_() {} +func (*HistogramDataPoint_Max) isHistogramDataPoint_Max_() {} + +func (m *HistogramDataPoint) GetSum_() isHistogramDataPoint_Sum_ { + if m != nil { + return m.Sum_ + } + return nil +} +func (m *HistogramDataPoint) GetMin_() isHistogramDataPoint_Min_ { + if m != nil { + return m.Min_ + } + return nil +} +func (m *HistogramDataPoint) GetMax_() isHistogramDataPoint_Max_ { + if m != nil { + return m.Max_ + } + return nil +} + +func (m *HistogramDataPoint) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *HistogramDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *HistogramDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *HistogramDataPoint) GetCount() uint64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *HistogramDataPoint) GetSum() float64 { + if x, ok := m.GetSum_().(*HistogramDataPoint_Sum); ok { + return x.Sum + } + return 0 +} + +func (m *HistogramDataPoint) GetBucketCounts() []uint64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +func (m *HistogramDataPoint) GetExplicitBounds() []float64 { + if m != nil { + return m.ExplicitBounds + } + return nil +} + +func (m *HistogramDataPoint) GetExemplars() []Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +func (m *HistogramDataPoint) GetFlags() uint32 { + if m != nil { + return m.Flags + } + return 0 +} + +func (m *HistogramDataPoint) GetMin() float64 { + if x, ok := m.GetMin_().(*HistogramDataPoint_Min); ok { + return x.Min + } + return 0 +} + +func (m *HistogramDataPoint) GetMax() float64 { + if x, ok := m.GetMax_().(*HistogramDataPoint_Max); ok { + return x.Max + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*HistogramDataPoint) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*HistogramDataPoint_Sum)(nil), + (*HistogramDataPoint_Min)(nil), + (*HistogramDataPoint_Max)(nil), + } +} + +// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +type ExponentialHistogramDataPoint struct { + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []v11.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be + // non-negative. This value must be equal to the sum of the "bucket_counts" + // values in the positive and negative Buckets plus the "zero_count" field. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + // + // Types that are valid to be assigned to Sum_: + // *ExponentialHistogramDataPoint_Sum + Sum_ isExponentialHistogramDataPoint_Sum_ `protobuf_oneof:"sum_"` + // scale describes the resolution of the histogram. Boundaries are + // located at powers of the base, where: + // + // base = (2^(2^-scale)) + // + // The histogram bucket identified by `index`, a signed integer, + // contains values that are greater than (base^index) and + // less than or equal to (base^(index+1)). + // + // The positive and negative ranges of the histogram are expressed + // separately. Negative values are mapped by their absolute value + // into the negative range using the same scale as the positive range. + // + // scale is not restricted by the protocol, as the permissible + // values depend on the range of the data. + Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"` + // zero_count is the count of values that are either exactly zero or + // within the region considered zero by the instrumentation at the + // tolerated degree of precision. This bucket stores values that + // cannot be expressed using the standard exponential formula as + // well as values that have been rounded to zero. + // + // Implementations MAY consider the zero bucket to have probability + // mass equal to (zero_count / count). + ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"` + // positive carries the positive range of exponential bucket counts. + Positive ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive"` + // negative carries the negative range of exponential bucket counts. + Negative ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars"` + // min is the minimum value over (start_time, end_time]. + // + // Types that are valid to be assigned to Min_: + // *ExponentialHistogramDataPoint_Min + Min_ isExponentialHistogramDataPoint_Min_ `protobuf_oneof:"min_"` + // max is the maximum value over (start_time, end_time]. + // + // Types that are valid to be assigned to Max_: + // *ExponentialHistogramDataPoint_Max + Max_ isExponentialHistogramDataPoint_Max_ `protobuf_oneof:"max_"` + // ZeroThreshold may be optionally set to convey the width of the zero + // region. Where the zero region is defined as the closed interval + // [-ZeroThreshold, ZeroThreshold]. + // When ZeroThreshold is 0, zero count bucket stores values that cannot be + // expressed using the standard exponential formula as well as values that + // have been rounded to zero. + ZeroThreshold float64 `protobuf:"fixed64,14,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"` +} + +func (m *ExponentialHistogramDataPoint) Reset() { *m = ExponentialHistogramDataPoint{} } +func (m *ExponentialHistogramDataPoint) String() string { return proto.CompactTextString(m) } +func (*ExponentialHistogramDataPoint) ProtoMessage() {} +func (*ExponentialHistogramDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{11} +} +func (m *ExponentialHistogramDataPoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExponentialHistogramDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExponentialHistogramDataPoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExponentialHistogramDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExponentialHistogramDataPoint.Merge(m, src) +} +func (m *ExponentialHistogramDataPoint) XXX_Size() int { + return m.Size() +} +func (m *ExponentialHistogramDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_ExponentialHistogramDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_ExponentialHistogramDataPoint proto.InternalMessageInfo + +type isExponentialHistogramDataPoint_Sum_ interface { + isExponentialHistogramDataPoint_Sum_() + MarshalTo([]byte) (int, error) + Size() int +} +type isExponentialHistogramDataPoint_Min_ interface { + isExponentialHistogramDataPoint_Min_() + MarshalTo([]byte) (int, error) + Size() int +} +type isExponentialHistogramDataPoint_Max_ interface { + isExponentialHistogramDataPoint_Max_() + MarshalTo([]byte) (int, error) + Size() int +} + +type ExponentialHistogramDataPoint_Sum struct { + Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"` +} +type ExponentialHistogramDataPoint_Min struct { + Min float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"` +} +type ExponentialHistogramDataPoint_Max struct { + Max float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"` +} + +func (*ExponentialHistogramDataPoint_Sum) isExponentialHistogramDataPoint_Sum_() {} +func (*ExponentialHistogramDataPoint_Min) isExponentialHistogramDataPoint_Min_() {} +func (*ExponentialHistogramDataPoint_Max) isExponentialHistogramDataPoint_Max_() {} + +func (m *ExponentialHistogramDataPoint) GetSum_() isExponentialHistogramDataPoint_Sum_ { + if m != nil { + return m.Sum_ + } + return nil +} +func (m *ExponentialHistogramDataPoint) GetMin_() isExponentialHistogramDataPoint_Min_ { + if m != nil { + return m.Min_ + } + return nil +} +func (m *ExponentialHistogramDataPoint) GetMax_() isExponentialHistogramDataPoint_Max_ { + if m != nil { + return m.Max_ + } + return nil +} + +func (m *ExponentialHistogramDataPoint) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *ExponentialHistogramDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *ExponentialHistogramDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *ExponentialHistogramDataPoint) GetCount() uint64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *ExponentialHistogramDataPoint) GetSum() float64 { + if x, ok := m.GetSum_().(*ExponentialHistogramDataPoint_Sum); ok { + return x.Sum + } + return 0 +} + +func (m *ExponentialHistogramDataPoint) GetScale() int32 { + if m != nil { + return m.Scale + } + return 0 +} + +func (m *ExponentialHistogramDataPoint) GetZeroCount() uint64 { + if m != nil { + return m.ZeroCount + } + return 0 +} + +func (m *ExponentialHistogramDataPoint) GetPositive() ExponentialHistogramDataPoint_Buckets { + if m != nil { + return m.Positive + } + return ExponentialHistogramDataPoint_Buckets{} +} + +func (m *ExponentialHistogramDataPoint) GetNegative() ExponentialHistogramDataPoint_Buckets { + if m != nil { + return m.Negative + } + return ExponentialHistogramDataPoint_Buckets{} +} + +func (m *ExponentialHistogramDataPoint) GetFlags() uint32 { + if m != nil { + return m.Flags + } + return 0 +} + +func (m *ExponentialHistogramDataPoint) GetExemplars() []Exemplar { + if m != nil { + return m.Exemplars + } + return nil +} + +func (m *ExponentialHistogramDataPoint) GetMin() float64 { + if x, ok := m.GetMin_().(*ExponentialHistogramDataPoint_Min); ok { + return x.Min + } + return 0 +} + +func (m *ExponentialHistogramDataPoint) GetMax() float64 { + if x, ok := m.GetMax_().(*ExponentialHistogramDataPoint_Max); ok { + return x.Max + } + return 0 +} + +func (m *ExponentialHistogramDataPoint) GetZeroThreshold() float64 { + if m != nil { + return m.ZeroThreshold + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ExponentialHistogramDataPoint) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ExponentialHistogramDataPoint_Sum)(nil), + (*ExponentialHistogramDataPoint_Min)(nil), + (*ExponentialHistogramDataPoint_Max)(nil), + } +} + +// Buckets are a set of bucket counts, encoded in a contiguous array +// of counts. +type ExponentialHistogramDataPoint_Buckets struct { + // Offset is the bucket index of the first entry in the bucket_counts array. + // + // Note: This uses a varint encoding as a simple form of compression. + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + // bucket_counts is an array of count values, where bucket_counts[i] carries + // the count of the bucket at index (offset+i). bucket_counts[i] is the count + // of values greater than base^(offset+i) and less than or equal to + // base^(offset+i+1). + // + // Note: By contrast, the explicit HistogramDataPoint uses + // fixed64. This field is expected to have many buckets, + // especially zeros, so uint64 has been selected to ensure + // varint encoding. + BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` +} + +func (m *ExponentialHistogramDataPoint_Buckets) Reset() { *m = ExponentialHistogramDataPoint_Buckets{} } +func (m *ExponentialHistogramDataPoint_Buckets) String() string { return proto.CompactTextString(m) } +func (*ExponentialHistogramDataPoint_Buckets) ProtoMessage() {} +func (*ExponentialHistogramDataPoint_Buckets) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{11, 0} +} +func (m *ExponentialHistogramDataPoint_Buckets) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExponentialHistogramDataPoint_Buckets) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ExponentialHistogramDataPoint_Buckets) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.Merge(m, src) +} +func (m *ExponentialHistogramDataPoint_Buckets) XXX_Size() int { + return m.Size() +} +func (m *ExponentialHistogramDataPoint_Buckets) XXX_DiscardUnknown() { + xxx_messageInfo_ExponentialHistogramDataPoint_Buckets.DiscardUnknown(m) +} + +var xxx_messageInfo_ExponentialHistogramDataPoint_Buckets proto.InternalMessageInfo + +func (m *ExponentialHistogramDataPoint_Buckets) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +func (m *ExponentialHistogramDataPoint_Buckets) GetBucketCounts() []uint64 { + if m != nil { + return m.BucketCounts + } + return nil +} + +// SummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +type SummaryDataPoint struct { + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []v11.KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary + Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (m *SummaryDataPoint) Reset() { *m = SummaryDataPoint{} } +func (m *SummaryDataPoint) String() string { return proto.CompactTextString(m) } +func (*SummaryDataPoint) ProtoMessage() {} +func (*SummaryDataPoint) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{12} +} +func (m *SummaryDataPoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SummaryDataPoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SummaryDataPoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SummaryDataPoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryDataPoint.Merge(m, src) +} +func (m *SummaryDataPoint) XXX_Size() int { + return m.Size() +} +func (m *SummaryDataPoint) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryDataPoint.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryDataPoint proto.InternalMessageInfo + +func (m *SummaryDataPoint) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *SummaryDataPoint) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *SummaryDataPoint) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *SummaryDataPoint) GetCount() uint64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *SummaryDataPoint) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *SummaryDataPoint) GetQuantileValues() []*SummaryDataPoint_ValueAtQuantile { + if m != nil { + return m.QuantileValues + } + return nil +} + +func (m *SummaryDataPoint) GetFlags() uint32 { + if m != nil { + return m.Flags + } + return 0 +} + +// Represents the value at a given quantile of a distribution. +// +// To record Min and Max values following conventions are used: +// - The 1.0 quantile is equivalent to the maximum value observed. +// - The 0.0 quantile is equivalent to the minimum value observed. +// +// See the following issue for more context: +// https://github.com/open-telemetry/opentelemetry-proto/issues/125 +type SummaryDataPoint_ValueAtQuantile struct { + // The quantile of a distribution. Must be in the interval + // [0.0, 1.0]. + Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` + // The value at the given quantile of a distribution. + // + // Quantile values must NOT be negative. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *SummaryDataPoint_ValueAtQuantile) Reset() { *m = SummaryDataPoint_ValueAtQuantile{} } +func (m *SummaryDataPoint_ValueAtQuantile) String() string { return proto.CompactTextString(m) } +func (*SummaryDataPoint_ValueAtQuantile) ProtoMessage() {} +func (*SummaryDataPoint_ValueAtQuantile) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{12, 0} +} +func (m *SummaryDataPoint_ValueAtQuantile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SummaryDataPoint_ValueAtQuantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SummaryDataPoint_ValueAtQuantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.Merge(m, src) +} +func (m *SummaryDataPoint_ValueAtQuantile) XXX_Size() int { + return m.Size() +} +func (m *SummaryDataPoint_ValueAtQuantile) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryDataPoint_ValueAtQuantile.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryDataPoint_ValueAtQuantile proto.InternalMessageInfo + +func (m *SummaryDataPoint_ValueAtQuantile) GetQuantile() float64 { + if m != nil { + return m.Quantile + } + return 0 +} + +func (m *SummaryDataPoint_ValueAtQuantile) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// A representation of an exemplar, which is a sample input measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +type Exemplar struct { + // The set of key/value pairs that were filtered out by the aggregator, but + // recorded alongside the original measurement. Only key/value pairs that were + // filtered out by the aggregator should be included + FilteredAttributes []v11.KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes"` + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // The value of the measurement that was recorded. An exemplar is + // considered invalid when one of the recognized value fields is not present + // inside this oneof. + // + // Types that are valid to be assigned to Value: + // *Exemplar_AsDouble + // *Exemplar_AsInt + Value isExemplar_Value `protobuf_oneof:"value"` + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"` +} + +func (m *Exemplar) Reset() { *m = Exemplar{} } +func (m *Exemplar) String() string { return proto.CompactTextString(m) } +func (*Exemplar) ProtoMessage() {} +func (*Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_3c3112f9fa006917, []int{13} +} +func (m *Exemplar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_Exemplar.Merge(m, src) +} +func (m *Exemplar) XXX_Size() int { + return m.Size() +} +func (m *Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_Exemplar proto.InternalMessageInfo + +type isExemplar_Value interface { + isExemplar_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type Exemplar_AsDouble struct { + AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof" json:"as_double,omitempty"` +} +type Exemplar_AsInt struct { + AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof" json:"as_int,omitempty"` +} + +func (*Exemplar_AsDouble) isExemplar_Value() {} +func (*Exemplar_AsInt) isExemplar_Value() {} + +func (m *Exemplar) GetValue() isExemplar_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Exemplar) GetFilteredAttributes() []v11.KeyValue { + if m != nil { + return m.FilteredAttributes + } + return nil +} + +func (m *Exemplar) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *Exemplar) GetAsDouble() float64 { + if x, ok := m.GetValue().(*Exemplar_AsDouble); ok { + return x.AsDouble + } + return 0 +} + +func (m *Exemplar) GetAsInt() int64 { + if x, ok := m.GetValue().(*Exemplar_AsInt); ok { + return x.AsInt + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Exemplar) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Exemplar_AsDouble)(nil), + (*Exemplar_AsInt)(nil), + } +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.metrics.v1.AggregationTemporality", AggregationTemporality_name, AggregationTemporality_value) + proto.RegisterEnum("opentelemetry.proto.metrics.v1.DataPointFlags", DataPointFlags_name, DataPointFlags_value) + proto.RegisterType((*MetricsData)(nil), "opentelemetry.proto.metrics.v1.MetricsData") + proto.RegisterType((*ResourceMetrics)(nil), "opentelemetry.proto.metrics.v1.ResourceMetrics") + proto.RegisterType((*ScopeMetrics)(nil), "opentelemetry.proto.metrics.v1.ScopeMetrics") + proto.RegisterType((*Metric)(nil), "opentelemetry.proto.metrics.v1.Metric") + proto.RegisterType((*Gauge)(nil), "opentelemetry.proto.metrics.v1.Gauge") + proto.RegisterType((*Sum)(nil), "opentelemetry.proto.metrics.v1.Sum") + proto.RegisterType((*Histogram)(nil), "opentelemetry.proto.metrics.v1.Histogram") + proto.RegisterType((*ExponentialHistogram)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogram") + proto.RegisterType((*Summary)(nil), "opentelemetry.proto.metrics.v1.Summary") + proto.RegisterType((*NumberDataPoint)(nil), "opentelemetry.proto.metrics.v1.NumberDataPoint") + proto.RegisterType((*HistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.HistogramDataPoint") + proto.RegisterType((*ExponentialHistogramDataPoint)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint") + proto.RegisterType((*ExponentialHistogramDataPoint_Buckets)(nil), "opentelemetry.proto.metrics.v1.ExponentialHistogramDataPoint.Buckets") + proto.RegisterType((*SummaryDataPoint)(nil), "opentelemetry.proto.metrics.v1.SummaryDataPoint") + proto.RegisterType((*SummaryDataPoint_ValueAtQuantile)(nil), "opentelemetry.proto.metrics.v1.SummaryDataPoint.ValueAtQuantile") + proto.RegisterType((*Exemplar)(nil), "opentelemetry.proto.metrics.v1.Exemplar") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/metrics/v1/metrics.proto", fileDescriptor_3c3112f9fa006917) +} + +var fileDescriptor_3c3112f9fa006917 = []byte{ + // 1553 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xcd, 0x4f, 0x1b, 0x49, + 0x16, 0x77, 0xfb, 0xdb, 0xcf, 0x06, 0x9c, 0x5a, 0x96, 0xb4, 0x58, 0xe1, 0x38, 0xce, 0x26, 0xb0, + 0xd9, 0xc8, 0x5e, 0xc8, 0x6a, 0x3f, 0x0e, 0x91, 0x62, 0x63, 0x03, 0x26, 0x80, 0x49, 0xd9, 0x20, + 0x25, 0x8a, 0xd2, 0x2a, 0xec, 0xc2, 0xb4, 0xd2, 0xdd, 0xe5, 0xed, 0xae, 0x46, 0xb0, 0xff, 0xc1, + 0x4a, 0x7b, 0xc8, 0xdf, 0xb1, 0xda, 0xdb, 0x9c, 0xe6, 0x36, 0xc7, 0x1c, 0x33, 0xb7, 0xd1, 0x68, + 0x14, 0xcd, 0x90, 0xc3, 0x8c, 0x34, 0xff, 0xc4, 0xa8, 0xaa, 0xbb, 0xf1, 0x07, 0x26, 0x26, 0x1f, + 0x87, 0xe4, 0xd4, 0x55, 0xaf, 0xde, 0xfb, 0xd5, 0xfb, 0xf8, 0x55, 0xbd, 0x52, 0xc3, 0x3d, 0xd6, + 0xa3, 0x16, 0xa7, 0x06, 0x35, 0x29, 0xb7, 0x4f, 0x4b, 0x3d, 0x9b, 0x71, 0x56, 0x12, 0x63, 0xbd, + 0xed, 0x94, 0x8e, 0x97, 0x83, 0x61, 0x51, 0x2e, 0xa0, 0xdc, 0x90, 0xb6, 0x27, 0x2c, 0x06, 0x2a, + 0xc7, 0xcb, 0xf3, 0xb3, 0x5d, 0xd6, 0x65, 0x1e, 0x86, 0x18, 0x79, 0x0a, 0xf3, 0x77, 0xc7, 0xed, + 0xd1, 0x66, 0xa6, 0xc9, 0x2c, 0xb1, 0x85, 0x37, 0xf2, 0x75, 0x8b, 0xe3, 0x74, 0x6d, 0xea, 0x30, + 0xd7, 0x6e, 0x53, 0xa1, 0x1d, 0x8c, 0x3d, 0xfd, 0x82, 0x0e, 0xe9, 0x6d, 0x6f, 0xff, 0x2a, 0xe1, + 0x04, 0x3d, 0x85, 0x6c, 0xa0, 0xa0, 0xf9, 0x7e, 0xa9, 0x4a, 0x3e, 0xb2, 0x94, 0x5e, 0x29, 0x15, + 0xdf, 0xed, 0x7b, 0x11, 0xfb, 0x76, 0x3e, 0x1c, 0x9e, 0xb1, 0x87, 0x05, 0x85, 0xaf, 0xc3, 0x30, + 0x33, 0xa2, 0x84, 0xba, 0xa0, 0x76, 0x68, 0xcf, 0xa6, 0x6d, 0xc2, 0x69, 0x47, 0x73, 0xda, 0xac, + 0xd7, 0xdf, 0xf7, 0x97, 0x84, 0xdc, 0xf8, 0xde, 0xa4, 0x8d, 0x9b, 0xc2, 0x2a, 0xd8, 0x75, 0xae, + 0x0f, 0x37, 0x28, 0x47, 0x8f, 0x20, 0x19, 0xf8, 0xa3, 0x2a, 0x79, 0x65, 0x29, 0xbd, 0xf2, 0xa7, + 0xb1, 0xb8, 0xe7, 0xe9, 0x19, 0x88, 0xa8, 0x12, 0x7d, 0xf5, 0xe6, 0x46, 0x08, 0x9f, 0x03, 0xa0, + 0xc7, 0x30, 0x35, 0xec, 0x6a, 0xf8, 0x03, 0x3c, 0xcd, 0x38, 0x83, 0xfe, 0x2d, 0x00, 0x38, 0xed, + 0x23, 0x6a, 0x12, 0xcd, 0xb5, 0x0d, 0x35, 0x92, 0x57, 0x96, 0x52, 0x38, 0xe5, 0x49, 0xf6, 0x6c, + 0xa3, 0xf0, 0x8d, 0x02, 0x99, 0xa1, 0x78, 0x1a, 0x10, 0x93, 0xf6, 0x7e, 0x30, 0xf7, 0xc7, 0x6e, + 0xed, 0x33, 0xe3, 0x78, 0xb9, 0x58, 0xb7, 0x1c, 0x6e, 0xbb, 0x26, 0xb5, 0x38, 0xe1, 0x3a, 0xb3, + 0x24, 0x94, 0x1f, 0x96, 0x87, 0x83, 0x1e, 0x42, 0x62, 0x38, 0x9a, 0x3b, 0x93, 0xa2, 0xf1, 0x5c, + 0xc1, 0x81, 0xd9, 0xa4, 0x10, 0x7e, 0x8a, 0x40, 0xdc, 0x33, 0x41, 0x08, 0xa2, 0x16, 0x31, 0x3d, + 0xdf, 0x53, 0x58, 0x8e, 0x51, 0x1e, 0xd2, 0x1d, 0xea, 0xb4, 0x6d, 0xbd, 0x27, 0x1c, 0x54, 0xc3, + 0x72, 0x69, 0x50, 0x24, 0xac, 0x5c, 0x4b, 0xe7, 0x3e, 0xb2, 0x1c, 0xa3, 0x07, 0x10, 0xeb, 0x12, + 0xb7, 0x4b, 0xd5, 0x98, 0x4c, 0xc3, 0xed, 0x49, 0x3e, 0xaf, 0x0b, 0xe5, 0x8d, 0x10, 0xf6, 0xac, + 0xd0, 0xdf, 0x21, 0xe2, 0xb8, 0xa6, 0x9a, 0x90, 0xc6, 0xb7, 0x26, 0x96, 0xcf, 0x35, 0x37, 0x42, + 0x58, 0x58, 0xa0, 0x3a, 0xa4, 0x8e, 0x74, 0x87, 0xb3, 0xae, 0x4d, 0x4c, 0x35, 0xf5, 0x0e, 0x3e, + 0x0d, 0x98, 0x6f, 0x04, 0x06, 0x1b, 0x21, 0xdc, 0xb7, 0x46, 0x2f, 0xe0, 0xf7, 0xf4, 0xa4, 0xc7, + 0x2c, 0x6a, 0x71, 0x9d, 0x18, 0x5a, 0x1f, 0x16, 0x24, 0xec, 0x5f, 0x27, 0xc1, 0xd6, 0xfa, 0xc6, + 0x83, 0x3b, 0xcc, 0xd2, 0x31, 0x72, 0xb4, 0x0a, 0x09, 0xc7, 0x35, 0x4d, 0x62, 0x9f, 0xaa, 0x69, + 0x09, 0xbf, 0x78, 0x85, 0xa0, 0x85, 0xfa, 0x46, 0x08, 0x07, 0x96, 0x95, 0x38, 0x44, 0x3b, 0x84, + 0x93, 0xcd, 0x68, 0x32, 0x9a, 0x8d, 0x6d, 0x46, 0x93, 0xf1, 0x6c, 0x62, 0x33, 0x9a, 0x4c, 0x66, + 0x53, 0x85, 0x27, 0x10, 0x93, 0x19, 0x46, 0xbb, 0x90, 0x16, 0x2a, 0x5a, 0x8f, 0xe9, 0x16, 0xbf, + 0xf2, 0x15, 0xb2, 0xe3, 0x9a, 0x07, 0xd4, 0x16, 0x17, 0xd1, 0xae, 0xb0, 0xc3, 0xd0, 0x09, 0x86, + 0x4e, 0xe1, 0x57, 0x05, 0x22, 0x4d, 0xd7, 0xfc, 0xf4, 0xc8, 0x88, 0xc1, 0x75, 0xd2, 0xed, 0xda, + 0xb4, 0x2b, 0x8f, 0x86, 0xc6, 0xa9, 0xd9, 0x63, 0x36, 0x31, 0x74, 0x7e, 0x2a, 0x59, 0x38, 0xbd, + 0xf2, 0xb7, 0x49, 0xe8, 0xe5, 0xbe, 0x79, 0xab, 0x6f, 0x8d, 0xe7, 0xc8, 0x58, 0x39, 0xba, 0x09, + 0x19, 0xdd, 0xd1, 0x4c, 0x66, 0x31, 0xce, 0x2c, 0xbd, 0x2d, 0x09, 0x9d, 0xc4, 0x69, 0xdd, 0xd9, + 0x0e, 0x44, 0x85, 0x6f, 0x15, 0x48, 0xf5, 0xab, 0xd6, 0x1c, 0x17, 0xf3, 0xca, 0x95, 0xf9, 0xf6, + 0x79, 0x84, 0x5d, 0xf8, 0x59, 0x81, 0xd9, 0x71, 0x64, 0x45, 0xcf, 0xc7, 0x85, 0xf7, 0xe0, 0x43, + 0x78, 0xff, 0x99, 0x44, 0xfa, 0x0c, 0x12, 0xfe, 0xb1, 0x41, 0x8f, 0xc7, 0xc5, 0xf6, 0x97, 0x2b, + 0x1e, 0xba, 0xf1, 0x27, 0xe1, 0x2c, 0x0c, 0x33, 0x23, 0x7c, 0x46, 0xdb, 0x00, 0x84, 0x73, 0x5b, + 0x3f, 0x70, 0x39, 0x75, 0x54, 0xaf, 0x71, 0x2e, 0x4e, 0xe8, 0x09, 0x8f, 0xe8, 0xe9, 0x3e, 0x31, + 0xdc, 0xa0, 0x0f, 0x0c, 0x00, 0xa0, 0x12, 0xcc, 0x3a, 0x9c, 0xd8, 0x5c, 0xe3, 0xba, 0x49, 0x35, + 0xd7, 0xd2, 0x4f, 0x34, 0x8b, 0x58, 0x4c, 0xa6, 0x2b, 0x8e, 0xaf, 0xc9, 0xb5, 0x96, 0x6e, 0xd2, + 0x3d, 0x4b, 0x3f, 0xd9, 0x21, 0x16, 0x43, 0x7f, 0x84, 0xe9, 0x11, 0xd5, 0x88, 0x54, 0xcd, 0xf0, + 0x41, 0xad, 0x05, 0x48, 0x11, 0x47, 0xeb, 0x30, 0xf7, 0xc0, 0xa0, 0x6a, 0x34, 0xaf, 0x2c, 0x29, + 0x1b, 0x21, 0x9c, 0x24, 0x4e, 0x55, 0x4a, 0xd0, 0x75, 0x88, 0x13, 0x47, 0xd3, 0x2d, 0xae, 0xc6, + 0xf3, 0xca, 0x52, 0x56, 0x5c, 0xd3, 0xc4, 0xa9, 0x5b, 0x1c, 0x6d, 0x41, 0x8a, 0x9e, 0x50, 0xb3, + 0x67, 0x10, 0xdb, 0x51, 0x63, 0x32, 0xb8, 0xa5, 0xc9, 0xf4, 0xf0, 0x0c, 0xfc, 0xe8, 0xfa, 0x00, + 0x68, 0x16, 0x62, 0x87, 0x06, 0xe9, 0x3a, 0x6a, 0x32, 0xaf, 0x2c, 0x4d, 0x61, 0x6f, 0x52, 0x49, + 0x40, 0xec, 0x58, 0x64, 0x63, 0x33, 0x9a, 0x54, 0xb2, 0xe1, 0xc2, 0x0f, 0x11, 0x40, 0x17, 0x69, + 0x35, 0x92, 0xe7, 0xd4, 0x67, 0x9a, 0xe7, 0x59, 0x88, 0xb5, 0x99, 0x6b, 0x71, 0x99, 0xe3, 0x38, + 0xf6, 0x26, 0x08, 0x79, 0xcd, 0x2e, 0xe6, 0xe7, 0x5d, 0xf6, 0xb1, 0x5b, 0x30, 0x75, 0xe0, 0xb6, + 0x5f, 0x50, 0xae, 0x49, 0x1d, 0x47, 0x8d, 0xe7, 0x23, 0x02, 0xce, 0x13, 0xae, 0x4a, 0x19, 0x5a, + 0x84, 0x19, 0x7a, 0xd2, 0x33, 0xf4, 0xb6, 0xce, 0xb5, 0x03, 0xe6, 0x5a, 0x1d, 0x8f, 0x61, 0x0a, + 0x9e, 0x0e, 0xc4, 0x15, 0x29, 0x1d, 0xae, 0x53, 0xf2, 0x93, 0xd5, 0x09, 0x06, 0xea, 0x24, 0xa2, + 0x30, 0x75, 0x4b, 0x76, 0x2f, 0x65, 0x43, 0xc1, 0x62, 0x22, 0x65, 0xe4, 0x44, 0xcd, 0x48, 0x59, + 0x18, 0x8b, 0x89, 0x68, 0x52, 0x8e, 0x6b, 0x6a, 0xe2, 0x6b, 0xea, 0x96, 0xf7, 0x25, 0x27, 0x9a, + 0x5f, 0xde, 0xff, 0xc4, 0x61, 0xe1, 0x9d, 0x17, 0xc8, 0x48, 0xa5, 0x95, 0x2f, 0xbe, 0xd2, 0xb3, + 0xe2, 0xc1, 0x48, 0x0c, 0x2a, 0xcf, 0xd6, 0x35, 0xec, 0x4d, 0xc4, 0x9b, 0xed, 0xdf, 0xd4, 0x66, + 0x5e, 0xf5, 0xe5, 0x3b, 0x28, 0x8e, 0x53, 0x42, 0x22, 0x4b, 0x8f, 0xba, 0x90, 0xec, 0x31, 0x47, + 0xe7, 0xfa, 0x31, 0x95, 0xa7, 0x25, 0xbd, 0x52, 0xfb, 0xa8, 0x6b, 0xb9, 0x58, 0x91, 0xbc, 0x72, + 0x82, 0x17, 0x75, 0x00, 0x2e, 0x36, 0xb2, 0xe4, 0x45, 0x7a, 0x4c, 0xfd, 0xe7, 0xd4, 0xa7, 0xdd, + 0x28, 0x00, 0xbf, 0x84, 0x54, 0x43, 0xc4, 0x4d, 0x7f, 0x2c, 0x71, 0x7d, 0x8a, 0x66, 0xc6, 0x50, + 0x74, 0x6a, 0x80, 0xa2, 0xe8, 0x36, 0x4c, 0xcb, 0xe4, 0xf3, 0x23, 0x9b, 0x3a, 0x47, 0xcc, 0xe8, + 0xa8, 0xd3, 0x62, 0x19, 0x4f, 0x09, 0x69, 0x2b, 0x10, 0xce, 0xaf, 0x41, 0xc2, 0x8f, 0x06, 0xcd, + 0x41, 0x9c, 0x1d, 0x1e, 0x3a, 0x94, 0xcb, 0xa7, 0xf3, 0x35, 0xec, 0xcf, 0x2e, 0x1e, 0x63, 0xf1, + 0x84, 0x8f, 0x0e, 0x1f, 0xe3, 0xcb, 0x4e, 0x44, 0xe1, 0xff, 0x11, 0xc8, 0x8e, 0x36, 0x9c, 0x2f, + 0xa4, 0xa1, 0x8c, 0xa7, 0x7f, 0x76, 0x80, 0xfe, 0x1e, 0xf9, 0x75, 0x98, 0xf9, 0x97, 0x4b, 0x2c, + 0xae, 0x1b, 0x54, 0x93, 0xb7, 0xbc, 0x77, 0xd1, 0xa5, 0x57, 0x1e, 0xbe, 0x6f, 0x27, 0x2e, 0xca, + 0x08, 0xcb, 0xfc, 0xb1, 0x0f, 0x87, 0xa7, 0x03, 0x60, 0xb9, 0x70, 0x49, 0x77, 0x99, 0x5f, 0x85, + 0x99, 0x11, 0x43, 0x34, 0x0f, 0xc9, 0xc0, 0x54, 0x56, 0x53, 0xc1, 0xe7, 0x73, 0x01, 0x22, 0xdd, + 0x94, 0xf9, 0x51, 0xf0, 0x50, 0x67, 0x7a, 0x19, 0x81, 0x64, 0xc0, 0x3d, 0xf4, 0x1c, 0x7e, 0x77, + 0xa8, 0x1b, 0x9c, 0xda, 0xb4, 0xa3, 0x7d, 0x6c, 0xbd, 0x50, 0x80, 0x54, 0xee, 0xd7, 0xed, 0x62, + 0x19, 0xc2, 0x93, 0xfa, 0x7a, 0xe4, 0xea, 0x7d, 0xfd, 0x09, 0x24, 0x9c, 0x1e, 0xb1, 0x34, 0xbd, + 0x23, 0x0b, 0x98, 0xa9, 0x3c, 0x14, 0x8e, 0x7c, 0xff, 0xe6, 0xc6, 0x3f, 0xba, 0x6c, 0xc4, 0x77, + 0x9d, 0x95, 0xda, 0xcc, 0x30, 0x68, 0x9b, 0x33, 0xbb, 0xd4, 0x13, 0xaf, 0xa1, 0x92, 0x6e, 0x71, + 0x6a, 0x5b, 0xc4, 0x28, 0x89, 0x59, 0xb1, 0xd9, 0x23, 0x56, 0xbd, 0x8a, 0xe3, 0x02, 0xb0, 0xde, + 0x41, 0xcf, 0x20, 0xc9, 0x6d, 0xd2, 0xa6, 0x02, 0x3b, 0x26, 0xb1, 0xcb, 0x3e, 0xf6, 0x3f, 0xdf, + 0x1f, 0xbb, 0x25, 0x90, 0xea, 0x55, 0x9c, 0x90, 0x90, 0xf5, 0xce, 0xc8, 0x63, 0xe1, 0xee, 0x7f, + 0x15, 0x98, 0x1b, 0xff, 0x44, 0x44, 0x8b, 0x70, 0xab, 0xbc, 0xbe, 0x8e, 0x6b, 0xeb, 0xe5, 0x56, + 0xbd, 0xb1, 0xa3, 0xb5, 0x6a, 0xdb, 0xbb, 0x0d, 0x5c, 0xde, 0xaa, 0xb7, 0x9e, 0x68, 0x7b, 0x3b, + 0xcd, 0xdd, 0xda, 0x6a, 0x7d, 0xad, 0x5e, 0xab, 0x66, 0x43, 0xe8, 0x26, 0x2c, 0x5c, 0xa6, 0x58, + 0xad, 0x6d, 0xb5, 0xca, 0x59, 0x05, 0xdd, 0x81, 0xc2, 0x65, 0x2a, 0xab, 0x7b, 0xdb, 0x7b, 0x5b, + 0xe5, 0x56, 0x7d, 0xbf, 0x96, 0x0d, 0xdf, 0x7d, 0x0e, 0xd3, 0xe7, 0x7c, 0x5d, 0x93, 0xf7, 0xdb, + 0x0d, 0xf8, 0x43, 0xb5, 0xdc, 0x2a, 0x6b, 0xbb, 0x8d, 0xfa, 0x4e, 0x4b, 0x5b, 0xdb, 0x2a, 0xaf, + 0x37, 0xb5, 0x6a, 0x43, 0xdb, 0x69, 0xb4, 0xb4, 0xbd, 0x66, 0x2d, 0x1b, 0x42, 0x7f, 0x86, 0xc5, + 0x0b, 0x0a, 0x3b, 0x0d, 0x0d, 0xd7, 0x56, 0x1b, 0xb8, 0x5a, 0xab, 0x6a, 0xfb, 0xe5, 0xad, 0xbd, + 0x9a, 0xb6, 0x5d, 0x6e, 0x3e, 0xca, 0x2a, 0x95, 0xaf, 0x94, 0x57, 0x67, 0x39, 0xe5, 0xf5, 0x59, + 0x4e, 0xf9, 0xf1, 0x2c, 0xa7, 0xbc, 0x7c, 0x9b, 0x0b, 0xbd, 0x7e, 0x9b, 0x0b, 0x7d, 0xf7, 0x36, + 0x17, 0x82, 0x9b, 0x3a, 0x9b, 0x70, 0xa2, 0x2a, 0x19, 0xff, 0x17, 0xc6, 0xae, 0x58, 0xd8, 0x55, + 0x9e, 0xd6, 0xde, 0xbb, 0x1e, 0xde, 0x5f, 0xad, 0x2e, 0xb5, 0x06, 0x7e, 0xb4, 0xfd, 0x2f, 0x9c, + 0x6b, 0xf4, 0xa8, 0xd5, 0x3a, 0x07, 0x91, 0xf0, 0xfe, 0x3f, 0x0a, 0xa7, 0xb8, 0xbf, 0x7c, 0x10, + 0x97, 0x56, 0xf7, 0x7f, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x24, 0xc4, 0x73, 0x45, 0xb2, 0x13, 0x00, + 0x00, +} + +func (m *MetricsData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricsData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceMetrics) > 0 { + for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DeprecatedScopeMetrics) > 0 { + for iNdEx := len(m.DeprecatedScopeMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DeprecatedScopeMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3e + i-- + dAtA[i] = 0xc2 + } + } + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.ScopeMetrics) > 0 { + for iNdEx := len(m.ScopeMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ScopeMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Metric) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Data != nil { + { + size := m.Data.Size() + i -= size + if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x1a + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Metric_Gauge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Gauge != nil { + { + size, err := m.Gauge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Metric_Sum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Sum != nil { + { + size, err := m.Sum.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Metric_Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Histogram != nil { + { + size, err := m.Histogram.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Metric_ExponentialHistogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_ExponentialHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExponentialHistogram != nil { + { + size, err := m.ExponentialHistogram.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Metric_Summary) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Metric_Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Summary != nil { + { + size, err := m.Summary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Gauge) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Gauge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Gauge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Sum) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IsMonotonic { + i-- + if m.IsMonotonic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.AggregationTemporality != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x10 + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Histogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationTemporality != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x10 + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ExponentialHistogram) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExponentialHistogram) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExponentialHistogram) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationTemporality != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x10 + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Summary) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Summary) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Summary) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DataPoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NumberDataPoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NumberDataPoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NumberDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Flags != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x40 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.Value != nil { + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + return len(dAtA) - i, nil +} + +func (m *NumberDataPoint_AsDouble) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NumberDataPoint_AsDouble) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble)))) + i-- + dAtA[i] = 0x21 + return len(dAtA) - i, nil +} +func (m *NumberDataPoint_AsInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NumberDataPoint_AsInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt)) + i-- + dAtA[i] = 0x31 + return len(dAtA) - i, nil +} +func (m *HistogramDataPoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HistogramDataPoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Max_ != nil { + { + size := m.Max_.Size() + i -= size + if _, err := m.Max_.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Min_ != nil { + { + size := m.Min_.Size() + i -= size + if _, err := m.Min_.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Flags != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x50 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.ExplicitBounds) > 0 { + for iNdEx := len(m.ExplicitBounds) - 1; iNdEx >= 0; iNdEx-- { + f8 := math.Float64bits(float64(m.ExplicitBounds[iNdEx])) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f8)) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.ExplicitBounds)*8)) + i-- + dAtA[i] = 0x3a + } + if len(m.BucketCounts) > 0 { + for iNdEx := len(m.BucketCounts) - 1; iNdEx >= 0; iNdEx-- { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BucketCounts[iNdEx])) + } + i = encodeVarintMetrics(dAtA, i, uint64(len(m.BucketCounts)*8)) + i-- + dAtA[i] = 0x32 + } + if m.Sum_ != nil { + { + size := m.Sum_.Size() + i -= size + if _, err := m.Sum_.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Count != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) + i-- + dAtA[i] = 0x21 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + return len(dAtA) - i, nil +} + +func (m *HistogramDataPoint_Sum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HistogramDataPoint_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x29 + return len(dAtA) - i, nil +} +func (m *HistogramDataPoint_Min) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HistogramDataPoint_Min) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min)))) + i-- + dAtA[i] = 0x59 + return len(dAtA) - i, nil +} +func (m *HistogramDataPoint_Max) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HistogramDataPoint_Max) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max)))) + i-- + dAtA[i] = 0x61 + return len(dAtA) - i, nil +} +func (m *ExponentialHistogramDataPoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExponentialHistogramDataPoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExponentialHistogramDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ZeroThreshold != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold)))) + i-- + dAtA[i] = 0x71 + } + if m.Max_ != nil { + { + size := m.Max_.Size() + i -= size + if _, err := m.Max_.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Min_ != nil { + { + size := m.Min_.Size() + i -= size + if _, err := m.Min_.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if m.Flags != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x50 + } + { + size, err := m.Negative.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + { + size, err := m.Positive.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + if m.ZeroCount != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ZeroCount)) + i-- + dAtA[i] = 0x39 + } + if m.Scale != 0 { + i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Scale)<<1)^uint32((m.Scale>>31)))) + i-- + dAtA[i] = 0x30 + } + if m.Sum_ != nil { + { + size := m.Sum_.Size() + i -= size + if _, err := m.Sum_.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Count != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) + i-- + dAtA[i] = 0x21 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ExponentialHistogramDataPoint_Sum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExponentialHistogramDataPoint_Sum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x29 + return len(dAtA) - i, nil +} +func (m *ExponentialHistogramDataPoint_Min) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExponentialHistogramDataPoint_Min) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Min)))) + i-- + dAtA[i] = 0x61 + return len(dAtA) - i, nil +} +func (m *ExponentialHistogramDataPoint_Max) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExponentialHistogramDataPoint_Max) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Max)))) + i-- + dAtA[i] = 0x69 + return len(dAtA) - i, nil +} +func (m *ExponentialHistogramDataPoint_Buckets) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExponentialHistogramDataPoint_Buckets) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExponentialHistogramDataPoint_Buckets) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BucketCounts) > 0 { + dAtA12 := make([]byte, len(m.BucketCounts)*10) + var j11 int + for _, num := range m.BucketCounts { + for num >= 1<<7 { + dAtA12[j11] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j11++ + } + dAtA12[j11] = uint8(num) + j11++ + } + i -= j11 + copy(dAtA[i:], dAtA12[:j11]) + i = encodeVarintMetrics(dAtA, i, uint64(j11)) + i-- + dAtA[i] = 0x12 + } + if m.Offset != 0 { + i = encodeVarintMetrics(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SummaryDataPoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SummaryDataPoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SummaryDataPoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Flags != 0 { + i = encodeVarintMetrics(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x40 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.QuantileValues) > 0 { + for iNdEx := len(m.QuantileValues) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.QuantileValues[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.Sum != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x29 + } + if m.Count != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) + i-- + dAtA[i] = 0x21 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + return len(dAtA) - i, nil +} + +func (m *SummaryDataPoint_ValueAtQuantile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SummaryDataPoint_ValueAtQuantile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SummaryDataPoint_ValueAtQuantile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if m.Quantile != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.FilteredAttributes) > 0 { + for iNdEx := len(m.FilteredAttributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.FilteredAttributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.Value != nil { + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + { + size := m.TraceId.Size() + i -= size + if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.SpanId.Size() + i -= size + if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintMetrics(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + return len(dAtA) - i, nil +} + +func (m *Exemplar_AsDouble) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar_AsDouble) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble)))) + i-- + dAtA[i] = 0x19 + return len(dAtA) - i, nil +} +func (m *Exemplar_AsInt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Exemplar_AsInt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt)) + i-- + dAtA[i] = 0x31 + return len(dAtA) - i, nil +} +func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { + offset -= sovMetrics(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MetricsData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceMetrics) > 0 { + for _, e := range m.ResourceMetrics { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *ResourceMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resource.Size() + n += 1 + l + sovMetrics(uint64(l)) + if len(m.ScopeMetrics) > 0 { + for _, e := range m.ScopeMetrics { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if len(m.DeprecatedScopeMetrics) > 0 { + for _, e := range m.DeprecatedScopeMetrics { + l = e.Size() + n += 2 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *ScopeMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Scope.Size() + n += 1 + l + sovMetrics(uint64(l)) + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} + +func (m *Metric) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } + if m.Data != nil { + n += m.Data.Size() + } + return n +} + +func (m *Metric_Gauge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Gauge != nil { + l = m.Gauge.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *Metric_Sum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + l = m.Sum.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *Metric_Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Histogram != nil { + l = m.Histogram.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *Metric_ExponentialHistogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExponentialHistogram != nil { + l = m.ExponentialHistogram.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *Metric_Summary) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Summary != nil { + l = m.Summary.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + return n +} +func (m *Gauge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *Sum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.AggregationTemporality != 0 { + n += 1 + sovMetrics(uint64(m.AggregationTemporality)) + } + if m.IsMonotonic { + n += 2 + } + return n +} + +func (m *Histogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.AggregationTemporality != 0 { + n += 1 + sovMetrics(uint64(m.AggregationTemporality)) + } + return n +} + +func (m *ExponentialHistogram) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.AggregationTemporality != 0 { + n += 1 + sovMetrics(uint64(m.AggregationTemporality)) + } + return n +} + +func (m *Summary) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *NumberDataPoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Value != nil { + n += m.Value.Size() + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Flags != 0 { + n += 1 + sovMetrics(uint64(m.Flags)) + } + return n +} + +func (m *NumberDataPoint_AsDouble) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *NumberDataPoint_AsInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *HistogramDataPoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Count != 0 { + n += 9 + } + if m.Sum_ != nil { + n += m.Sum_.Size() + } + if len(m.BucketCounts) > 0 { + n += 1 + sovMetrics(uint64(len(m.BucketCounts)*8)) + len(m.BucketCounts)*8 + } + if len(m.ExplicitBounds) > 0 { + n += 1 + sovMetrics(uint64(len(m.ExplicitBounds)*8)) + len(m.ExplicitBounds)*8 + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Flags != 0 { + n += 1 + sovMetrics(uint64(m.Flags)) + } + if m.Min_ != nil { + n += m.Min_.Size() + } + if m.Max_ != nil { + n += m.Max_.Size() + } + return n +} + +func (m *HistogramDataPoint_Sum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *HistogramDataPoint_Min) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *HistogramDataPoint_Max) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *ExponentialHistogramDataPoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Count != 0 { + n += 9 + } + if m.Sum_ != nil { + n += m.Sum_.Size() + } + if m.Scale != 0 { + n += 1 + sozMetrics(uint64(m.Scale)) + } + if m.ZeroCount != 0 { + n += 9 + } + l = m.Positive.Size() + n += 1 + l + sovMetrics(uint64(l)) + l = m.Negative.Size() + n += 1 + l + sovMetrics(uint64(l)) + if m.Flags != 0 { + n += 1 + sovMetrics(uint64(m.Flags)) + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Min_ != nil { + n += m.Min_.Size() + } + if m.Max_ != nil { + n += m.Max_.Size() + } + if m.ZeroThreshold != 0 { + n += 9 + } + return n +} + +func (m *ExponentialHistogramDataPoint_Sum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *ExponentialHistogramDataPoint_Min) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *ExponentialHistogramDataPoint_Max) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *ExponentialHistogramDataPoint_Buckets) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + sozMetrics(uint64(m.Offset)) + } + if len(m.BucketCounts) > 0 { + l = 0 + for _, e := range m.BucketCounts { + l += sovMetrics(uint64(e)) + } + n += 1 + sovMetrics(uint64(l)) + l + } + return n +} + +func (m *SummaryDataPoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Count != 0 { + n += 9 + } + if m.Sum != 0 { + n += 9 + } + if len(m.QuantileValues) > 0 { + for _, e := range m.QuantileValues { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + if m.Flags != 0 { + n += 1 + sovMetrics(uint64(m.Flags)) + } + return n +} + +func (m *SummaryDataPoint_ValueAtQuantile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Quantile != 0 { + n += 9 + } + if m.Value != 0 { + n += 9 + } + return n +} + +func (m *Exemplar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Value != nil { + n += m.Value.Size() + } + l = m.SpanId.Size() + n += 1 + l + sovMetrics(uint64(l)) + l = m.TraceId.Size() + n += 1 + l + sovMetrics(uint64(l)) + if len(m.FilteredAttributes) > 0 { + for _, e := range m.FilteredAttributes { + l = e.Size() + n += 1 + l + sovMetrics(uint64(l)) + } + } + return n +} + +func (m *Exemplar_AsDouble) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Exemplar_AsInt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} + +func sovMetrics(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMetrics(x uint64) (n int) { + return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MetricsData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricsData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricsData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceMetrics = append(m.ResourceMetrics, &ResourceMetrics{}) + if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScopeMetrics = append(m.ScopeMetrics, &ScopeMetrics{}) + if err := m.ScopeMetrics[len(m.ScopeMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 1000: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedScopeMetrics = append(m.DeprecatedScopeMetrics, &ScopeMetrics{}) + if err := m.DeprecatedScopeMetrics[len(m.DeprecatedScopeMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, &Metric{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metric) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Gauge{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_Gauge{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Sum{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_Sum{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Histogram{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_Histogram{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ExponentialHistogram{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_ExponentialHistogram{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Summary{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_Summary{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Gauge) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Gauge: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &NumberDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sum) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &NumberDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsMonotonic = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &HistogramDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExponentialHistogram) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExponentialHistogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExponentialHistogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &ExponentialHistogramDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Summary) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Summary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &SummaryDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NumberDataPoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NumberDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NumberDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &NumberDataPoint_AsDouble{float64(math.Float64frombits(v))} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType) + } + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &NumberDataPoint_AsInt{v} + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HistogramDataPoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HistogramDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum_ = &HistogramDataPoint_Sum{float64(math.Float64frombits(v))} + case 6: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.BucketCounts = append(m.BucketCounts, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.BucketCounts) == 0 { + m.BucketCounts = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.BucketCounts = append(m.BucketCounts, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) + } + case 7: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.ExplicitBounds = append(m.ExplicitBounds, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.ExplicitBounds) == 0 { + m.ExplicitBounds = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.ExplicitBounds = append(m.ExplicitBounds, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType) + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Min_ = &HistogramDataPoint_Min{float64(math.Float64frombits(v))} + case 12: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Max_ = &HistogramDataPoint_Max{float64(math.Float64frombits(v))} + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExponentialHistogramDataPoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExponentialHistogramDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExponentialHistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum_ = &ExponentialHistogramDataPoint_Sum{float64(math.Float64frombits(v))} + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Scale = v + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType) + } + m.ZeroCount = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.ZeroCount = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Positive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Positive.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Negative", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Negative.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Min_ = &ExponentialHistogramDataPoint_Min{float64(math.Float64frombits(v))} + case 13: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Max_ = &ExponentialHistogramDataPoint_Max{float64(math.Float64frombits(v))} + case 14: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroThreshold", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.ZeroThreshold = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExponentialHistogramDataPoint_Buckets) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Buckets: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Buckets: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BucketCounts = append(m.BucketCounts, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.BucketCounts) == 0 { + m.BucketCounts = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BucketCounts = append(m.BucketCounts, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SummaryDataPoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SummaryDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SummaryDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Count = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QuantileValues = append(m.QuantileValues, &SummaryDataPoint_ValueAtQuantile{}) + if err := m.QuantileValues[len(m.QuantileValues)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SummaryDataPoint_ValueAtQuantile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValueAtQuantile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValueAtQuantile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Quantile = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &Exemplar_AsDouble{float64(math.Float64frombits(v))} + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType) + } + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &Exemplar_AsInt{v} + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilteredAttributes = append(m.FilteredAttributes, v11.KeyValue{}) + if err := m.FilteredAttributes[len(m.FilteredAttributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMetrics(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMetrics + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMetrics(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMetrics + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMetrics + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMetrics + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMetrics + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go new file mode 100644 index 000000000..d4d1565c7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1/resource.pb.go @@ -0,0 +1,381 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/resource/v1/resource.proto + +package v1 + +import ( + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Resource information. +type Resource struct { + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_446f73eacf88f3f5, []int{0} +} +func (m *Resource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return m.Size() +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetAttributes() []v1.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Resource) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func init() { + proto.RegisterType((*Resource)(nil), "opentelemetry.proto.resource.v1.Resource") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/resource/v1/resource.proto", fileDescriptor_446f73eacf88f3f5) +} + +var fileDescriptor_446f73eacf88f3f5 = []byte{ + // 302 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xcb, 0x2f, 0x48, 0xcd, + 0x2b, 0x49, 0xcd, 0x49, 0xcd, 0x4d, 0x2d, 0x29, 0xaa, 0xd4, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, + 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, + 0x42, 0xf2, 0x28, 0xea, 0x21, 0x82, 0x7a, 0x70, 0x35, 0x65, 0x86, 0x52, 0x22, 0xe9, 0xf9, 0xe9, + 0xf9, 0x10, 0x63, 0x40, 0x2c, 0x88, 0x0a, 0x29, 0x2d, 0x6c, 0xd6, 0x24, 0xe7, 0xe7, 0xe6, 0xe6, + 0xe7, 0x81, 0x2c, 0x81, 0xb0, 0x20, 0x6a, 0x95, 0x26, 0x33, 0x72, 0x71, 0x04, 0x41, 0x4d, 0x14, + 0xf2, 0xe5, 0xe2, 0x4a, 0x2c, 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0x49, 0x2d, 0x96, 0x60, 0x54, + 0x60, 0xd6, 0xe0, 0x36, 0x52, 0xd7, 0xc3, 0xe6, 0x08, 0xa8, 0x19, 0x65, 0x86, 0x7a, 0xde, 0xa9, + 0x95, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x4e, 0x2c, 0x27, 0xee, 0xc9, 0x33, 0x04, 0x21, 0x19, 0x20, + 0x64, 0xc1, 0x25, 0x91, 0x52, 0x94, 0x5f, 0x50, 0x90, 0x9a, 0x12, 0x8f, 0x10, 0x8d, 0x4f, 0xce, + 0x2f, 0xcd, 0x2b, 0x91, 0x60, 0x52, 0x60, 0xd4, 0xe0, 0x0d, 0x12, 0x83, 0xca, 0x3b, 0xc2, 0xa5, + 0x9d, 0x41, 0xb2, 0x4e, 0xdb, 0x19, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, + 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x81, + 0x4b, 0x29, 0x33, 0x5f, 0x8f, 0x40, 0xb0, 0x38, 0xf1, 0xc2, 0x7c, 0x14, 0x00, 0x92, 0x0a, 0x60, + 0x8c, 0x72, 0x4b, 0x47, 0xd7, 0x94, 0x09, 0x0a, 0x91, 0x9c, 0x9c, 0xd4, 0xe4, 0x92, 0xfc, 0x22, + 0xfd, 0x82, 0x94, 0xc4, 0x92, 0x44, 0xfd, 0xcc, 0xbc, 0x92, 0xd4, 0xa2, 0xbc, 0xc4, 0x1c, 0x7d, + 0x30, 0x0f, 0x6c, 0x6a, 0x7a, 0x6a, 0x1e, 0x72, 0xfc, 0xac, 0x62, 0x92, 0xf7, 0x2f, 0x48, 0xcd, + 0x0b, 0x81, 0x9b, 0x02, 0x36, 0x5f, 0x0f, 0x66, 0x9b, 0x5e, 0x98, 0x61, 0x12, 0x1b, 0x58, 0x9f, + 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x8b, 0xcf, 0x38, 0xeb, 0x01, 0x00, 0x00, +} + +func (m *Resource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DroppedAttributesCount != 0 { + i = encodeVarintResource(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResource(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintResource(dAtA []byte, offset int, v uint64) int { + offset -= sovResource(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Resource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovResource(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sovResource(uint64(m.DroppedAttributesCount)) + } + return n +} + +func sovResource(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozResource(x uint64) (n int) { + return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Resource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResource + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipResource(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthResource + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupResource + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthResource + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowResource = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupResource = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go new file mode 100644 index 000000000..b48ce2260 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1/trace.pb.go @@ -0,0 +1,2884 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: opentelemetry/proto/trace/v1/trace.proto + +package v1 + +import ( + encoding_binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + math_bits "math/bits" + + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + + go_opentelemetry_io_collector_pdata_internal_data "go.opentelemetry.io/collector/pdata/internal/data" + v11 "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" + v1 "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SPAN_KIND_SERVER Span_SpanKind = 2 + // Indicates that the span describes a request to some remote service. + Span_SPAN_KIND_CLIENT Span_SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 +) + +var Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SPAN_KIND_INTERNAL", + 2: "SPAN_KIND_SERVER", + 3: "SPAN_KIND_CLIENT", + 4: "SPAN_KIND_PRODUCER", + 5: "SPAN_KIND_CONSUMER", +} + +var Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SPAN_KIND_INTERNAL": 1, + "SPAN_KIND_SERVER": 2, + "SPAN_KIND_CLIENT": 3, + "SPAN_KIND_PRODUCER": 4, + "SPAN_KIND_CONSUMER": 5, +} + +func (x Span_SpanKind) String() string { + return proto.EnumName(Span_SpanKind_name, int32(x)) +} + +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3, 0} +} + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type Status_StatusCode int32 + +const ( + // The default status. + Status_STATUS_CODE_UNSET Status_StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + Status_STATUS_CODE_OK Status_StatusCode = 1 + // The Span contains an error. + Status_STATUS_CODE_ERROR Status_StatusCode = 2 +) + +var Status_StatusCode_name = map[int32]string{ + 0: "STATUS_CODE_UNSET", + 1: "STATUS_CODE_OK", + 2: "STATUS_CODE_ERROR", +} + +var Status_StatusCode_value = map[string]int32{ + "STATUS_CODE_UNSET": 0, + "STATUS_CODE_OK": 1, + "STATUS_CODE_ERROR": 2, +} + +func (x Status_StatusCode) String() string { + return proto.EnumName(Status_StatusCode_name, int32(x)) +} + +func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{4, 0} +} + +// TracesData represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type TracesData struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` +} + +func (m *TracesData) Reset() { *m = TracesData{} } +func (m *TracesData) String() string { return proto.CompactTextString(m) } +func (*TracesData) ProtoMessage() {} +func (*TracesData) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{0} +} +func (m *TracesData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TracesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TracesData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TracesData) XXX_Merge(src proto.Message) { + xxx_messageInfo_TracesData.Merge(m, src) +} +func (m *TracesData) XXX_Size() int { + return m.Size() +} +func (m *TracesData) XXX_DiscardUnknown() { + xxx_messageInfo_TracesData.DiscardUnknown(m) +} + +var xxx_messageInfo_TracesData proto.InternalMessageInfo + +func (m *TracesData) GetResourceSpans() []*ResourceSpans { + if m != nil { + return m.ResourceSpans + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + DeprecatedScopeSpans []*ScopeSpans `protobuf:"bytes,1000,rep,name=deprecated_scope_spans,json=deprecatedScopeSpans,proto3" json:"deprecated_scope_spans,omitempty"` + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (m *ResourceSpans) Reset() { *m = ResourceSpans{} } +func (m *ResourceSpans) String() string { return proto.CompactTextString(m) } +func (*ResourceSpans) ProtoMessage() {} +func (*ResourceSpans) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{1} +} +func (m *ResourceSpans) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceSpans.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceSpans) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceSpans.Merge(m, src) +} +func (m *ResourceSpans) XXX_Size() int { + return m.Size() +} +func (m *ResourceSpans) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceSpans.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceSpans proto.InternalMessageInfo + +func (m *ResourceSpans) GetDeprecatedScopeSpans() []*ScopeSpans { + if m != nil { + return m.DeprecatedScopeSpans + } + return nil +} + +func (m *ResourceSpans) GetResource() v1.Resource { + if m != nil { + return m.Resource + } + return v1.Resource{} +} + +func (m *ResourceSpans) GetScopeSpans() []*ScopeSpans { + if m != nil { + return m.ScopeSpans + } + return nil +} + +func (m *ResourceSpans) GetSchemaUrl() string { + if m != nil { + return m.SchemaUrl + } + return "" +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + // This schema_url applies to all spans and span events in the "spans" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (m *ScopeSpans) Reset() { *m = ScopeSpans{} } +func (m *ScopeSpans) String() string { return proto.CompactTextString(m) } +func (*ScopeSpans) ProtoMessage() {} +func (*ScopeSpans) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{2} +} +func (m *ScopeSpans) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ScopeSpans.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ScopeSpans) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeSpans.Merge(m, src) +} +func (m *ScopeSpans) XXX_Size() int { + return m.Size() +} +func (m *ScopeSpans) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeSpans.DiscardUnknown(m) +} + +var xxx_messageInfo_ScopeSpans proto.InternalMessageInfo + +func (m *ScopeSpans) GetScope() v11.InstrumentationScope { + if m != nil { + return m.Scope + } + return v11.InstrumentationScope{} +} + +func (m *ScopeSpans) GetSpans() []*Span { + if m != nil { + return m.Spans + } + return nil +} + +func (m *ScopeSpans) GetSchemaUrl() string { + if m != nil { + return m.SchemaUrl + } + return "" +} + +// A Span represents a single operation performed by a single component of the system. +// +// The next available field id is 17. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"parent_span_id"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // events is a collection of Event items. + Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3} +} +func (m *Span) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return m.Size() +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetTraceState() string { + if m != nil { + return m.TraceState + } + return "" +} + +func (m *Span) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span) GetKind() Span_SpanKind { + if m != nil { + return m.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (m *Span) GetStartTimeUnixNano() uint64 { + if m != nil { + return m.StartTimeUnixNano + } + return 0 +} + +func (m *Span) GetEndTimeUnixNano() uint64 { + if m != nil { + return m.EndTimeUnixNano + } + return 0 +} + +func (m *Span) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +func (m *Span) GetEvents() []*Span_Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *Span) GetDroppedEventsCount() uint32 { + if m != nil { + return m.DroppedEventsCount + } + return 0 +} + +func (m *Span) GetLinks() []*Span_Link { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetDroppedLinksCount() uint32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +func (m *Span) GetStatus() Status { + if m != nil { + return m.Status + } + return Status{} +} + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type Span_Event struct { + // time_unix_nano is the time the event occurred. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (m *Span_Event) Reset() { *m = Span_Event{} } +func (m *Span_Event) String() string { return proto.CompactTextString(m) } +func (*Span_Event) ProtoMessage() {} +func (*Span_Event) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3, 0} +} +func (m *Span_Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Span_Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Span_Event.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Span_Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Event.Merge(m, src) +} +func (m *Span_Event) XXX_Size() int { + return m.Size() +} +func (m *Span_Event) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Event proto.InternalMessageInfo + +func (m *Span_Event) GetTimeUnixNano() uint64 { + if m != nil { + return m.TimeUnixNano + } + return 0 +} + +func (m *Span_Event) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Span_Event) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span_Event) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId go_opentelemetry_io_collector_pdata_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.TraceID" json:"trace_id"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId go_opentelemetry_io_collector_pdata_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/pdata/internal/data.SpanID" json:"span_id"` + // The trace_state associated with the link. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{3, 1} +} +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(m, src) +} +func (m *Span_Link) XXX_Size() int { + return m.Size() +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceState() string { + if m != nil { + return m.TraceState + } + return "" +} + +func (m *Span_Link) GetAttributes() []v11.KeyValue { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span_Link) GetDroppedAttributesCount() uint32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // The status code. + Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_5c407ac9c675a601, []int{4} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return m.Size() +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetCode() Status_StatusCode { + if m != nil { + return m.Code + } + return Status_STATUS_CODE_UNSET +} + +func init() { + proto.RegisterEnum("opentelemetry.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) + proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_StatusCode", Status_StatusCode_name, Status_StatusCode_value) + proto.RegisterType((*TracesData)(nil), "opentelemetry.proto.trace.v1.TracesData") + proto.RegisterType((*ResourceSpans)(nil), "opentelemetry.proto.trace.v1.ResourceSpans") + proto.RegisterType((*ScopeSpans)(nil), "opentelemetry.proto.trace.v1.ScopeSpans") + proto.RegisterType((*Span)(nil), "opentelemetry.proto.trace.v1.Span") + proto.RegisterType((*Span_Event)(nil), "opentelemetry.proto.trace.v1.Span.Event") + proto.RegisterType((*Span_Link)(nil), "opentelemetry.proto.trace.v1.Span.Link") + proto.RegisterType((*Status)(nil), "opentelemetry.proto.trace.v1.Status") +} + +func init() { + proto.RegisterFile("opentelemetry/proto/trace/v1/trace.proto", fileDescriptor_5c407ac9c675a601) +} + +var fileDescriptor_5c407ac9c675a601 = []byte{ + // 1007 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0xcf, 0xa4, 0x4e, 0xda, 0xbe, 0xb6, 0x59, 0x77, 0xe8, 0x56, 0xa6, 0x5a, 0xd2, 0x28, 0x5a, + 0x89, 0xc0, 0x4a, 0x09, 0xed, 0x5e, 0xca, 0x01, 0xb1, 0x6d, 0x62, 0x24, 0xd3, 0x6e, 0x52, 0x8d, + 0x93, 0x4a, 0x20, 0x24, 0x33, 0x1b, 0x0f, 0xc5, 0x6a, 0x32, 0xb6, 0xec, 0x49, 0xb5, 0x7b, 0xe3, + 0x23, 0x70, 0xe5, 0x13, 0x20, 0xc1, 0x9d, 0x1b, 0xf7, 0x15, 0xa7, 0x3d, 0x22, 0x0e, 0x2b, 0xd4, + 0x5e, 0xe0, 0x5b, 0xa0, 0x99, 0xb1, 0xf3, 0xa7, 0xaa, 0x92, 0xad, 0xc4, 0x5e, 0xf6, 0xd2, 0x4e, + 0xde, 0x7b, 0xbf, 0x3f, 0xef, 0xcd, 0xb3, 0x65, 0xa8, 0x85, 0x11, 0xe3, 0x82, 0x0d, 0xd8, 0x90, + 0x89, 0xf8, 0x45, 0x23, 0x8a, 0x43, 0x11, 0x36, 0x44, 0x4c, 0xfb, 0xac, 0x71, 0xb9, 0xa7, 0x0f, + 0x75, 0x15, 0xc4, 0x0f, 0x66, 0x2a, 0x75, 0xb0, 0xae, 0x0b, 0x2e, 0xf7, 0x76, 0xb6, 0xce, 0xc3, + 0xf3, 0x50, 0xa3, 0xe5, 0x49, 0xa7, 0x77, 0x3e, 0xbe, 0x8d, 0xbd, 0x1f, 0x0e, 0x87, 0x21, 0x97, + 0xf4, 0xfa, 0x94, 0xd6, 0xd6, 0x6f, 0xab, 0x8d, 0x59, 0x12, 0x8e, 0x62, 0x6d, 0x26, 0x3b, 0xeb, + 0xfa, 0xea, 0xb7, 0x00, 0x5d, 0xa9, 0x9e, 0xb4, 0xa8, 0xa0, 0x98, 0x40, 0x29, 0xcb, 0x7b, 0x49, + 0x44, 0x79, 0x62, 0xa1, 0xca, 0x52, 0x6d, 0x6d, 0xff, 0x51, 0x7d, 0x9e, 0xed, 0x3a, 0x49, 0x31, + 0xae, 0x84, 0x90, 0x8d, 0x78, 0xfa, 0x67, 0xf5, 0xe7, 0x3c, 0x6c, 0xcc, 0x14, 0x60, 0x0f, 0xb6, + 0x7d, 0x16, 0xc5, 0xac, 0x4f, 0x05, 0xf3, 0xbd, 0xa4, 0x1f, 0x46, 0x99, 0xda, 0x3f, 0xcb, 0x4a, + 0xae, 0x36, 0x5f, 0xce, 0x95, 0x08, 0xad, 0xb5, 0x35, 0x21, 0x9a, 0x44, 0xf1, 0x31, 0xac, 0x64, + 0x1e, 0x2c, 0x54, 0x41, 0xb5, 0xb5, 0xfd, 0x8f, 0x6e, 0x65, 0x1c, 0xcf, 0x62, 0xaa, 0x87, 0x23, + 0xe3, 0xe5, 0xeb, 0xdd, 0x1c, 0x19, 0x13, 0x60, 0x07, 0xd6, 0xa6, 0x2d, 0xe6, 0xef, 0xe8, 0x10, + 0x92, 0x89, 0xaf, 0x0f, 0x00, 0x92, 0xfe, 0xf7, 0x6c, 0x48, 0xbd, 0x51, 0x3c, 0xb0, 0x96, 0x2a, + 0xa8, 0xb6, 0x4a, 0x56, 0x75, 0xa4, 0x17, 0x0f, 0xaa, 0xbf, 0x21, 0x80, 0xa9, 0x2e, 0x3a, 0x50, + 0x50, 0xd8, 0xb4, 0x85, 0xc7, 0xb7, 0x4a, 0xa6, 0x97, 0x7f, 0xb9, 0x57, 0x77, 0x78, 0x22, 0xe2, + 0xd1, 0x90, 0x71, 0x41, 0x45, 0x10, 0x72, 0x45, 0x94, 0x36, 0xa3, 0x79, 0xf0, 0x01, 0x14, 0xa6, + 0x7b, 0xa8, 0x2e, 0xe8, 0x21, 0xa2, 0x9c, 0x68, 0xc0, 0x22, 0xe3, 0x3f, 0x6c, 0x80, 0x21, 0xcb, + 0xf1, 0x37, 0xb0, 0xa2, 0xf0, 0x5e, 0xe0, 0x2b, 0xd7, 0xeb, 0x47, 0x87, 0xd2, 0xc0, 0x5f, 0xaf, + 0x77, 0x3f, 0x3d, 0x0f, 0x6f, 0xc8, 0x05, 0x72, 0x87, 0x07, 0x03, 0xd6, 0x17, 0x61, 0xdc, 0x88, + 0x7c, 0x2a, 0x68, 0x23, 0xe0, 0x82, 0xc5, 0x9c, 0x0e, 0x1a, 0xf2, 0x57, 0x5d, 0xed, 0xa5, 0xd3, + 0x22, 0xcb, 0x8a, 0xd2, 0xf1, 0xf1, 0x57, 0xb0, 0x2c, 0xed, 0x48, 0xf2, 0xbc, 0x22, 0x7f, 0x92, + 0x92, 0x1f, 0xdc, 0x9d, 0x5c, 0xda, 0x75, 0x5a, 0xa4, 0x28, 0x09, 0x1d, 0x1f, 0xef, 0xc2, 0x9a, + 0x36, 0x9e, 0x08, 0x2a, 0x58, 0xda, 0x21, 0xa8, 0x90, 0x2b, 0x23, 0xf8, 0x3b, 0x28, 0x45, 0x34, + 0x66, 0x5c, 0x78, 0x99, 0x05, 0xe3, 0x7f, 0xb2, 0xb0, 0xae, 0x79, 0x5d, 0x6d, 0x04, 0x83, 0xc1, + 0xe9, 0x90, 0x59, 0x05, 0xe5, 0x40, 0x9d, 0xf1, 0xe7, 0x60, 0x5c, 0x04, 0xdc, 0xb7, 0x8a, 0x15, + 0x54, 0x2b, 0x2d, 0x7a, 0x16, 0x25, 0x8f, 0xfa, 0x73, 0x1c, 0x70, 0x9f, 0x28, 0x20, 0x6e, 0xc0, + 0x56, 0x22, 0x68, 0x2c, 0x3c, 0x11, 0x0c, 0x99, 0x37, 0xe2, 0xc1, 0x73, 0x8f, 0x53, 0x1e, 0x5a, + 0xcb, 0x15, 0x54, 0x2b, 0x92, 0x4d, 0x95, 0xeb, 0x06, 0x43, 0xd6, 0xe3, 0xc1, 0xf3, 0x36, 0xe5, + 0x21, 0x7e, 0x04, 0x98, 0x71, 0xff, 0x66, 0xf9, 0x8a, 0x2a, 0xbf, 0xc7, 0xb8, 0x3f, 0x53, 0xfc, + 0x14, 0x80, 0x0a, 0x11, 0x07, 0xcf, 0x46, 0x82, 0x25, 0xd6, 0xaa, 0xda, 0xad, 0x0f, 0x17, 0x2c, + 0xeb, 0x31, 0x7b, 0x71, 0x46, 0x07, 0xa3, 0x6c, 0x41, 0xa7, 0x08, 0xf0, 0x01, 0x58, 0x7e, 0x1c, + 0x46, 0x11, 0xf3, 0xbd, 0x49, 0xd4, 0xeb, 0x87, 0x23, 0x2e, 0x2c, 0xa8, 0xa0, 0xda, 0x06, 0xd9, + 0x4e, 0xf3, 0x87, 0xe3, 0x74, 0x53, 0x66, 0xf1, 0x13, 0x28, 0xb2, 0x4b, 0xc6, 0x45, 0x62, 0xad, + 0xbd, 0xd1, 0x43, 0x2a, 0x27, 0x65, 0x4b, 0x00, 0x49, 0x71, 0xf8, 0x13, 0xd8, 0xca, 0xb4, 0x75, + 0x24, 0xd5, 0x5d, 0x57, 0xba, 0x38, 0xcd, 0x29, 0x4c, 0xaa, 0xf9, 0x19, 0x14, 0x06, 0x01, 0xbf, + 0x48, 0xac, 0x8d, 0x39, 0x7d, 0xcf, 0x4a, 0x9e, 0x04, 0xfc, 0x82, 0x68, 0x14, 0xae, 0xc3, 0x7b, + 0x99, 0xa0, 0x0a, 0xa4, 0x7a, 0x25, 0xa5, 0xb7, 0x99, 0xa6, 0x24, 0x20, 0x95, 0x3b, 0x82, 0xa2, + 0xdc, 0xd0, 0x51, 0x62, 0xdd, 0x53, 0x2f, 0x85, 0x87, 0x0b, 0xf4, 0x54, 0x6d, 0x3a, 0xe4, 0x14, + 0xb9, 0xf3, 0x07, 0x82, 0x82, 0x6a, 0x01, 0x3f, 0x84, 0xd2, 0x8d, 0x2b, 0x46, 0xea, 0x8a, 0xd7, + 0xc5, 0xf4, 0xfd, 0x66, 0x2b, 0x99, 0x9f, 0x5a, 0xc9, 0xd9, 0x3b, 0x5f, 0x7a, 0x9b, 0x77, 0x6e, + 0xcc, 0xbb, 0xf3, 0x9d, 0x7f, 0xf3, 0x60, 0xc8, 0xf9, 0xbc, 0xc3, 0xaf, 0x9e, 0xd9, 0x59, 0x1b, + 0x6f, 0x73, 0xd6, 0x85, 0x79, 0xb3, 0xae, 0xfe, 0x84, 0x60, 0x25, 0x7b, 0xb3, 0xe0, 0xf7, 0xe1, + 0xbe, 0x7b, 0x7a, 0xd8, 0xf6, 0x8e, 0x9d, 0x76, 0xcb, 0xeb, 0xb5, 0xdd, 0x53, 0xbb, 0xe9, 0x7c, + 0xe1, 0xd8, 0x2d, 0x33, 0x87, 0xb7, 0x01, 0x4f, 0x52, 0x4e, 0xbb, 0x6b, 0x93, 0xf6, 0xe1, 0x89, + 0x89, 0xf0, 0x16, 0x98, 0x93, 0xb8, 0x6b, 0x93, 0x33, 0x9b, 0x98, 0xf9, 0xd9, 0x68, 0xf3, 0xc4, + 0xb1, 0xdb, 0x5d, 0x73, 0x69, 0x96, 0xe3, 0x94, 0x74, 0x5a, 0xbd, 0xa6, 0x4d, 0x4c, 0x63, 0x36, + 0xde, 0xec, 0xb4, 0xdd, 0xde, 0x53, 0x9b, 0x98, 0x85, 0xea, 0xef, 0x08, 0x8a, 0x7a, 0xdb, 0xb1, + 0x05, 0xcb, 0x43, 0x96, 0x24, 0xf4, 0x3c, 0x5b, 0xd9, 0xec, 0x27, 0x6e, 0x82, 0xd1, 0x0f, 0x7d, + 0x3d, 0xe3, 0xd2, 0x7e, 0xe3, 0x4d, 0x9e, 0x9d, 0xf4, 0x5f, 0x33, 0xf4, 0x19, 0x51, 0xe0, 0x6a, + 0x1b, 0x60, 0x12, 0xc3, 0xf7, 0x61, 0xd3, 0xed, 0x1e, 0x76, 0x7b, 0xae, 0xd7, 0xec, 0xb4, 0x6c, + 0x39, 0x08, 0xbb, 0x6b, 0xe6, 0x30, 0x86, 0xd2, 0x74, 0xb8, 0x73, 0x6c, 0xa2, 0x9b, 0xa5, 0x36, + 0x21, 0x1d, 0x62, 0xe6, 0xbf, 0x34, 0x56, 0x90, 0x99, 0x3f, 0xfa, 0x15, 0xbd, 0xbc, 0x2a, 0xa3, + 0x57, 0x57, 0x65, 0xf4, 0xf7, 0x55, 0x19, 0xfd, 0x78, 0x5d, 0xce, 0xbd, 0xba, 0x2e, 0xe7, 0xfe, + 0xbc, 0x2e, 0xe7, 0x60, 0x37, 0x08, 0xe7, 0x3a, 0x3d, 0xd2, 0x5f, 0x70, 0xa7, 0x32, 0x78, 0x8a, + 0xbe, 0x6e, 0xde, 0x79, 0x23, 0xf5, 0x57, 0xe2, 0x39, 0xe3, 0xe3, 0x4f, 0xd6, 0x5f, 0xf2, 0x0f, + 0x3a, 0x11, 0xe3, 0xdd, 0x31, 0x85, 0x22, 0xd7, 0x8f, 0x45, 0xfd, 0x6c, 0xef, 0x59, 0x51, 0x21, + 0x1e, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xe9, 0x90, 0xbc, 0xf8, 0x0a, 0x00, 0x00, +} + +func (m *TracesData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TracesData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TracesData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceSpans) > 0 { + for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceSpans) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceSpans) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DeprecatedScopeSpans) > 0 { + for iNdEx := len(m.DeprecatedScopeSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DeprecatedScopeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3e + i-- + dAtA[i] = 0xc2 + } + } + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.ScopeSpans) > 0 { + for iNdEx := len(m.ScopeSpans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ScopeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeSpans) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeSpans) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.Spans) > 0 { + for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Spans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Scope.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Span) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Span) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Span) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + if m.DroppedLinksCount != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.DroppedLinksCount)) + i-- + dAtA[i] = 0x70 + } + if len(m.Links) > 0 { + for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + if m.DroppedEventsCount != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.DroppedEventsCount)) + i-- + dAtA[i] = 0x60 + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + if m.DroppedAttributesCount != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x50 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.EndTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.EndTimeUnixNano)) + i-- + dAtA[i] = 0x41 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x39 + } + if m.Kind != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x30 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTrace(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x2a + } + { + size := m.ParentSpanId.Size() + i -= size + if _, err := m.ParentSpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.TraceState) > 0 { + i -= len(m.TraceState) + copy(dAtA[i:], m.TraceState) + i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState))) + i-- + dAtA[i] = 0x1a + } + { + size := m.SpanId.Size() + i -= size + if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.TraceId.Size() + i -= size + if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Span_Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Span_Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Span_Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DroppedAttributesCount != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x20 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTrace(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.TimeUnixNano != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *Span_Link) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Span_Link) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Span_Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DroppedAttributesCount != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x28 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.TraceState) > 0 { + i -= len(m.TraceState) + copy(dAtA[i:], m.TraceState) + i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState))) + i-- + dAtA[i] = 0x1a + } + { + size := m.SpanId.Size() + i -= size + if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.TraceId.Size() + i -= size + if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTrace(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Status) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Status) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Code != 0 { + i = encodeVarintTrace(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x18 + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintTrace(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func encodeVarintTrace(dAtA []byte, offset int, v uint64) int { + offset -= sovTrace(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *TracesData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceSpans) > 0 { + for _, e := range m.ResourceSpans { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + return n +} + +func (m *ResourceSpans) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resource.Size() + n += 1 + l + sovTrace(uint64(l)) + if len(m.ScopeSpans) > 0 { + for _, e := range m.ScopeSpans { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + if len(m.DeprecatedScopeSpans) > 0 { + for _, e := range m.DeprecatedScopeSpans { + l = e.Size() + n += 2 + l + sovTrace(uint64(l)) + } + } + return n +} + +func (m *ScopeSpans) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Scope.Size() + n += 1 + l + sovTrace(uint64(l)) + if len(m.Spans) > 0 { + for _, e := range m.Spans { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + return n +} + +func (m *Span) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TraceId.Size() + n += 1 + l + sovTrace(uint64(l)) + l = m.SpanId.Size() + n += 1 + l + sovTrace(uint64(l)) + l = len(m.TraceState) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + l = m.ParentSpanId.Size() + n += 1 + l + sovTrace(uint64(l)) + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + if m.Kind != 0 { + n += 1 + sovTrace(uint64(m.Kind)) + } + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.EndTimeUnixNano != 0 { + n += 9 + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.DroppedEventsCount != 0 { + n += 1 + sovTrace(uint64(m.DroppedEventsCount)) + } + if len(m.Links) > 0 { + for _, e := range m.Links { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.DroppedLinksCount != 0 { + n += 1 + sovTrace(uint64(m.DroppedLinksCount)) + } + l = m.Status.Size() + n += 1 + l + sovTrace(uint64(l)) + return n +} + +func (m *Span_Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeUnixNano != 0 { + n += 9 + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) + } + return n +} + +func (m *Span_Link) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TraceId.Size() + n += 1 + l + sovTrace(uint64(l)) + l = m.SpanId.Size() + n += 1 + l + sovTrace(uint64(l)) + l = len(m.TraceState) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovTrace(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) + } + return n +} + +func (m *Status) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTrace(uint64(l)) + } + if m.Code != 0 { + n += 1 + sovTrace(uint64(m.Code)) + } + return n +} + +func sovTrace(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTrace(x uint64) (n int) { + return sovTrace(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *TracesData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TracesData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TracesData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceSpans = append(m.ResourceSpans, &ResourceSpans{}) + if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceSpans) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceSpans: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceSpans: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScopeSpans = append(m.ScopeSpans, &ScopeSpans{}) + if err := m.ScopeSpans[len(m.ScopeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 1000: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedScopeSpans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedScopeSpans = append(m.DeprecatedScopeSpans, &ScopeSpans{}) + if err := m.DeprecatedScopeSpans[len(m.DeprecatedScopeSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeSpans) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeSpans: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeSpans: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Scope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Spans = append(m.Spans, &Span{}) + if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Span) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Span: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceState = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ParentSpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= Span_SpanKind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 8: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType) + } + m.EndTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.EndTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &Span_Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedEventsCount", wireType) + } + m.DroppedEventsCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedEventsCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Links = append(m.Links, &Span_Link{}) + if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedLinksCount", wireType) + } + m.DroppedLinksCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedLinksCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Span_Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Span_Link) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Link: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceState = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v11.KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTrace + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTrace + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTrace + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= Status_StatusCode(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTrace(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTrace + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTrace(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTrace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTrace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTrace + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTrace + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTrace + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTrace + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTrace = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTrace = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTrace = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go new file mode 100644 index 000000000..25110f8b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/spanid.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package data // import "go.opentelemetry.io/collector/pdata/internal/data" + +import ( + "errors" + + "github.com/gogo/protobuf/proto" +) + +const spanIDSize = 8 + +var ( + errMarshalSpanID = errors.New("marshal: invalid buffer length for SpanID") + errUnmarshalSpanID = errors.New("unmarshal: invalid SpanID length") +) + +// SpanID is a custom data type that is used for all span_id fields in OTLP +// Protobuf messages. +type SpanID [spanIDSize]byte + +var _ proto.Sizer = (*SpanID)(nil) + +// Size returns the size of the data to serialize. +func (sid SpanID) Size() int { + if sid.IsEmpty() { + return 0 + } + return spanIDSize +} + +// IsEmpty returns true if id contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization. +func (sid SpanID) MarshalTo(data []byte) (n int, err error) { + if sid.IsEmpty() { + return 0, nil + } + + if len(data) < spanIDSize { + return 0, errMarshalSpanID + } + + return copy(data, sid[:]), nil +} + +// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization. +func (sid *SpanID) Unmarshal(data []byte) error { + if len(data) == 0 { + *sid = [spanIDSize]byte{} + return nil + } + + if len(data) != spanIDSize { + return errUnmarshalSpanID + } + + copy(sid[:], data) + return nil +} + +// MarshalJSON converts SpanID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes SpanID from hex string, possibly enclosed in quotes. +// Called by Protobuf JSON deserialization. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go b/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go new file mode 100644 index 000000000..4828ee02b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/data/traceid.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package data // import "go.opentelemetry.io/collector/pdata/internal/data" + +import ( + "errors" + + "github.com/gogo/protobuf/proto" +) + +const traceIDSize = 16 + +var ( + errMarshalTraceID = errors.New("marshal: invalid buffer length for TraceID") + errUnmarshalTraceID = errors.New("unmarshal: invalid TraceID length") +) + +// TraceID is a custom data type that is used for all trace_id fields in OTLP +// Protobuf messages. +type TraceID [traceIDSize]byte + +var _ proto.Sizer = (*SpanID)(nil) + +// Size returns the size of the data to serialize. +func (tid TraceID) Size() int { + if tid.IsEmpty() { + return 0 + } + return traceIDSize +} + +// IsEmpty returns true if id contains at leas one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalTo converts trace ID into a binary representation. Called by Protobuf serialization. +func (tid TraceID) MarshalTo(data []byte) (n int, err error) { + if tid.IsEmpty() { + return 0, nil + } + + if len(data) < traceIDSize { + return 0, errMarshalTraceID + } + + return copy(data, tid[:]), nil +} + +// Unmarshal inflates this trace ID from binary representation. Called by Protobuf serialization. +func (tid *TraceID) Unmarshal(data []byte) error { + if len(data) == 0 { + *tid = [traceIDSize]byte{} + return nil + } + + if len(data) != traceIDSize { + return errUnmarshalTraceID + } + + copy(tid[:], data) + return nil +} + +// MarshalJSON converts trace id into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +// Called by Protobuf JSON deserialization. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go new file mode 100644 index 000000000..ea3a3d95c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_byteslice.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +type ByteSlice struct { + orig *[]byte + state *State +} + +func GetOrigByteSlice(ms ByteSlice) *[]byte { + return ms.orig +} + +func GetByteSliceState(ms ByteSlice) *State { + return ms.state +} + +func NewByteSlice(orig *[]byte, state *State) ByteSlice { + return ByteSlice{orig: orig, state: state} +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go new file mode 100644 index 000000000..88d6c33d7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_float64slice.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +type Float64Slice struct { + orig *[]float64 + state *State +} + +func GetOrigFloat64Slice(ms Float64Slice) *[]float64 { + return ms.orig +} + +func GetFloat64SliceState(ms Float64Slice) *State { + return ms.state +} + +func NewFloat64Slice(orig *[]float64, state *State) Float64Slice { + return Float64Slice{orig: orig, state: state} +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go new file mode 100644 index 000000000..89b995bc9 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_instrumentationscope.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +type InstrumentationScope struct { + orig *otlpcommon.InstrumentationScope + state *State +} + +func GetOrigInstrumentationScope(ms InstrumentationScope) *otlpcommon.InstrumentationScope { + return ms.orig +} + +func GetInstrumentationScopeState(ms InstrumentationScope) *State { + return ms.state +} + +func NewInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *State) InstrumentationScope { + return InstrumentationScope{orig: orig, state: state} +} + +func GenerateTestInstrumentationScope() InstrumentationScope { + orig := otlpcommon.InstrumentationScope{} + state := StateMutable + tv := NewInstrumentationScope(&orig, &state) + FillTestInstrumentationScope(tv) + return tv +} + +func FillTestInstrumentationScope(tv InstrumentationScope) { + tv.orig.Name = "test_name" + tv.orig.Version = "test_version" + FillTestMap(NewMap(&tv.orig.Attributes, tv.state)) + tv.orig.DroppedAttributesCount = uint32(17) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go new file mode 100644 index 000000000..354b2457b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_resource.go @@ -0,0 +1,41 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +import ( + otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +type Resource struct { + orig *otlpresource.Resource + state *State +} + +func GetOrigResource(ms Resource) *otlpresource.Resource { + return ms.orig +} + +func GetResourceState(ms Resource) *State { + return ms.state +} + +func NewResource(orig *otlpresource.Resource, state *State) Resource { + return Resource{orig: orig, state: state} +} + +func GenerateTestResource() Resource { + orig := otlpresource.Resource{} + state := StateMutable + tv := NewResource(&orig, &state) + FillTestResource(tv) + return tv +} + +func FillTestResource(tv Resource) { + FillTestMap(NewMap(&tv.orig.Attributes, tv.state)) + tv.orig.DroppedAttributesCount = uint32(17) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go new file mode 100644 index 000000000..fed633ae2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/generated_wrapper_uint64slice.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package internal + +type UInt64Slice struct { + orig *[]uint64 + state *State +} + +func GetOrigUInt64Slice(ms UInt64Slice) *[]uint64 { + return ms.orig +} + +func GetUInt64SliceState(ms UInt64Slice) *State { + return ms.state +} + +func NewUInt64Slice(orig *[]uint64, state *State) UInt64Slice { + return UInt64Slice{orig: orig, state: state} +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/attribute.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/attribute.go new file mode 100644 index 000000000..89d957a65 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/json/attribute.go @@ -0,0 +1,110 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package json // import "go.opentelemetry.io/collector/pdata/internal/json" + +import ( + "encoding/base64" + "fmt" + + jsoniter "github.com/json-iterator/go" + + otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +// ReadAttribute Unmarshal JSON data and return otlpcommon.KeyValue +func ReadAttribute(iter *jsoniter.Iterator) otlpcommon.KeyValue { + kv := otlpcommon.KeyValue{} + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "key": + kv.Key = iter.ReadString() + case "value": + ReadValue(iter, &kv.Value) + default: + iter.Skip() + } + return true + }) + return kv +} + +// ReadValue Unmarshal JSON data and return otlpcommon.AnyValue +func ReadValue(iter *jsoniter.Iterator, val *otlpcommon.AnyValue) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "stringValue", "string_value": + val.Value = &otlpcommon.AnyValue_StringValue{ + StringValue: iter.ReadString(), + } + + case "boolValue", "bool_value": + val.Value = &otlpcommon.AnyValue_BoolValue{ + BoolValue: iter.ReadBool(), + } + case "intValue", "int_value": + val.Value = &otlpcommon.AnyValue_IntValue{ + IntValue: ReadInt64(iter), + } + case "doubleValue", "double_value": + val.Value = &otlpcommon.AnyValue_DoubleValue{ + DoubleValue: ReadFloat64(iter), + } + case "bytesValue", "bytes_value": + v, err := base64.StdEncoding.DecodeString(iter.ReadString()) + if err != nil { + iter.ReportError("bytesValue", fmt.Sprintf("base64 decode:%v", err)) + break + } + val.Value = &otlpcommon.AnyValue_BytesValue{ + BytesValue: v, + } + case "arrayValue", "array_value": + val.Value = &otlpcommon.AnyValue_ArrayValue{ + ArrayValue: readArray(iter), + } + case "kvlistValue", "kvlist_value": + val.Value = &otlpcommon.AnyValue_KvlistValue{ + KvlistValue: readKvlistValue(iter), + } + default: + iter.Skip() + } + return true + }) +} + +func readArray(iter *jsoniter.Iterator) *otlpcommon.ArrayValue { + v := &otlpcommon.ArrayValue{} + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "values": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + v.Values = append(v.Values, otlpcommon.AnyValue{}) + ReadValue(iter, &v.Values[len(v.Values)-1]) + return true + }) + default: + iter.Skip() + } + return true + }) + return v +} + +func readKvlistValue(iter *jsoniter.Iterator) *otlpcommon.KeyValueList { + v := &otlpcommon.KeyValueList{} + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "values": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + v.Values = append(v.Values, ReadAttribute(iter)) + return true + }) + default: + iter.Skip() + } + return true + }) + return v +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go new file mode 100644 index 000000000..4fbe19343 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/json/enum.go @@ -0,0 +1,29 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package json // import "go.opentelemetry.io/collector/pdata/internal/json" + +import ( + jsoniter "github.com/json-iterator/go" +) + +// ReadEnumValue returns the enum integer value representation. Accepts both enum names and enum integer values. +// See https://developers.google.com/protocol-buffers/docs/proto3#json. +func ReadEnumValue(iter *jsoniter.Iterator, valueMap map[string]int32) int32 { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + return iter.ReadInt32() + case jsoniter.StringValue: + val, ok := valueMap[iter.ReadString()] + // Same behavior with official protbuf JSON decoder, + // see https://github.com/open-telemetry/opentelemetry-proto-go/pull/81 + if !ok { + iter.ReportError("ReadEnumValue", "unknown string value") + return 0 + } + return val + default: + iter.ReportError("ReadEnumValue", "unsupported value type") + return 0 + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/json.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/json.go new file mode 100644 index 000000000..b77d934b6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/json/json.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package json // import "go.opentelemetry.io/collector/pdata/internal/json" + +import ( + "io" + + "github.com/gogo/protobuf/jsonpb" + "github.com/gogo/protobuf/proto" +) + +var marshaler = &jsonpb.Marshaler{ + // https://github.com/open-telemetry/opentelemetry-specification/pull/2758 + EnumsAsInts: true, + // https://github.com/open-telemetry/opentelemetry-specification/pull/2829 + OrigName: false, +} + +func Marshal(out io.Writer, pb proto.Message) error { + return marshaler.Marshal(out, pb) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/number.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/number.go new file mode 100644 index 000000000..23830b971 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/json/number.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package json // import "go.opentelemetry.io/collector/pdata/internal/json" + +import ( + "strconv" + + jsoniter "github.com/json-iterator/go" +) + +// ReadInt32 unmarshalls JSON data into an int32. Accepts both numbers and strings decimal. +// See https://developers.google.com/protocol-buffers/docs/proto3#json. +func ReadInt32(iter *jsoniter.Iterator) int32 { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + return iter.ReadInt32() + case jsoniter.StringValue: + val, err := strconv.ParseInt(iter.ReadString(), 10, 32) + if err != nil { + iter.ReportError("ReadInt32", err.Error()) + return 0 + } + return int32(val) + default: + iter.ReportError("ReadInt32", "unsupported value type") + return 0 + } +} + +// ReadUint32 unmarshalls JSON data into an uint32. Accepts both numbers and strings decimal. +// See https://developers.google.com/protocol-buffers/docs/proto3#json. +func ReadUint32(iter *jsoniter.Iterator) uint32 { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + return iter.ReadUint32() + case jsoniter.StringValue: + val, err := strconv.ParseUint(iter.ReadString(), 10, 32) + if err != nil { + iter.ReportError("ReadUint32", err.Error()) + return 0 + } + return uint32(val) + default: + iter.ReportError("ReadUint32", "unsupported value type") + return 0 + } +} + +// ReadInt64 unmarshalls JSON data into an int64. Accepts both numbers and strings decimal. +// See https://developers.google.com/protocol-buffers/docs/proto3#json. +func ReadInt64(iter *jsoniter.Iterator) int64 { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + return iter.ReadInt64() + case jsoniter.StringValue: + val, err := strconv.ParseInt(iter.ReadString(), 10, 64) + if err != nil { + iter.ReportError("ReadInt64", err.Error()) + return 0 + } + return val + default: + iter.ReportError("ReadInt64", "unsupported value type") + return 0 + } +} + +// ReadUint64 unmarshalls JSON data into an uint64. Accepts both numbers and strings decimal. +// See https://developers.google.com/protocol-buffers/docs/proto3#json. +func ReadUint64(iter *jsoniter.Iterator) uint64 { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + return iter.ReadUint64() + case jsoniter.StringValue: + val, err := strconv.ParseUint(iter.ReadString(), 10, 64) + if err != nil { + iter.ReportError("ReadUint64", err.Error()) + return 0 + } + return val + default: + iter.ReportError("ReadUint64", "unsupported value type") + return 0 + } +} + +func ReadFloat64(iter *jsoniter.Iterator) float64 { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + return iter.ReadFloat64() + case jsoniter.StringValue: + val, err := strconv.ParseFloat(iter.ReadString(), 64) + if err != nil { + iter.ReportError("ReadUint64", err.Error()) + return 0 + } + return val + default: + iter.ReportError("ReadUint64", "unsupported value type") + return 0 + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/resource.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/resource.go new file mode 100644 index 000000000..302033bc4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/json/resource.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package json // import "go.opentelemetry.io/collector/pdata/internal/json" + +import ( + jsoniter "github.com/json-iterator/go" + + otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +func ReadResource(iter *jsoniter.Iterator, resource *otlpresource.Resource) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + resource.Attributes = append(resource.Attributes, ReadAttribute(iter)) + return true + }) + case "droppedAttributesCount", "dropped_attributes_count": + resource.DroppedAttributesCount = ReadUint32(iter) + default: + iter.Skip() + } + return true + }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/json/scope.go b/vendor/go.opentelemetry.io/collector/pdata/internal/json/scope.go new file mode 100644 index 000000000..40ad41b15 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/json/scope.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package json // import "go.opentelemetry.io/collector/pdata/internal/json" + +import ( + jsoniter "github.com/json-iterator/go" + + otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +func ReadScope(iter *jsoniter.Iterator, scope *otlpcommon.InstrumentationScope) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "name": + scope.Name = iter.ReadString() + case "version": + scope.Version = iter.ReadString() + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + scope.Attributes = append(scope.Attributes, ReadAttribute(iter)) + return true + }) + case "droppedAttributesCount", "dropped_attributes_count": + scope.DroppedAttributesCount = ReadUint32(iter) + default: + iter.Skip() + } + return true + }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go new file mode 100644 index 000000000..c0328a5b4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/logs.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp" + +import ( + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +// MigrateLogs implements any translation needed due to deprecation in OTLP logs protocol. +// Any plog.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation. +func MigrateLogs(rls []*otlplogs.ResourceLogs) { + for _, rl := range rls { + if len(rl.ScopeLogs) == 0 { + rl.ScopeLogs = rl.DeprecatedScopeLogs + } + rl.DeprecatedScopeLogs = nil + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go new file mode 100644 index 000000000..9a7da1486 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/metrics.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp" + +import ( + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// MigrateMetrics implements any translation needed due to deprecation in OTLP metrics protocol. +// Any pmetric.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation. +func MigrateMetrics(rms []*otlpmetrics.ResourceMetrics) { + for _, rm := range rms { + if len(rm.ScopeMetrics) == 0 { + rm.ScopeMetrics = rm.DeprecatedScopeMetrics + } + rm.DeprecatedScopeMetrics = nil + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go new file mode 100644 index 000000000..627881fc3 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/otlp/traces.go @@ -0,0 +1,19 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package otlp // import "go.opentelemetry.io/collector/pdata/internal/otlp" + +import ( + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" +) + +// MigrateTraces implements any translation needed due to deprecation in OTLP traces protocol. +// Any ptrace.Unmarshaler implementation from OTLP (proto/json) MUST call this, and the gRPC Server implementation. +func MigrateTraces(rss []*otlptrace.ResourceSpans) { + for _, rs := range rss { + if len(rs.ScopeSpans) == 0 { + rs.ScopeSpans = rs.DeprecatedScopeSpans + } + rs.DeprecatedScopeSpans = nil + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/state.go b/vendor/go.opentelemetry.io/collector/pdata/internal/state.go new file mode 100644 index 000000000..f10de5ead --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/state.go @@ -0,0 +1,22 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +// State defines an ownership state of pmetric.Metrics, plog.Logs or ptrace.Traces. +type State int32 + +const ( + // StateMutable indicates that the data is exclusive to the current consumer. + StateMutable State = iota + + // StateReadOnly indicates that the data is shared with other consumers. + StateReadOnly +) + +// AssertMutable panics if the state is not StateMutable. +func (state *State) AssertMutable() { + if *state != StateMutable { + panic("invalid access to shared data") + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go new file mode 100644 index 000000000..6b6c076cc --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_logs.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +import ( + otlpcollectorlog "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1" + otlplogs "go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1" +) + +type Logs struct { + orig *otlpcollectorlog.ExportLogsServiceRequest + state *State +} + +func GetOrigLogs(ms Logs) *otlpcollectorlog.ExportLogsServiceRequest { + return ms.orig +} + +func GetLogsState(ms Logs) *State { + return ms.state +} + +func SetLogsState(ms Logs, state State) { + *ms.state = state +} + +func NewLogs(orig *otlpcollectorlog.ExportLogsServiceRequest, state *State) Logs { + return Logs{orig: orig, state: state} +} + +// LogsToProto internal helper to convert Logs to protobuf representation. +func LogsToProto(l Logs) otlplogs.LogsData { + return otlplogs.LogsData{ + ResourceLogs: l.orig.ResourceLogs, + } +} + +// LogsFromProto internal helper to convert protobuf representation to Logs. +// This function set exclusive state assuming that it's called only once per Logs. +func LogsFromProto(orig otlplogs.LogsData) Logs { + state := StateMutable + return NewLogs(&otlpcollectorlog.ExportLogsServiceRequest{ + ResourceLogs: orig.ResourceLogs, + }, &state) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go new file mode 100644 index 000000000..23a1c9056 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_map.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +import ( + otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +type Map struct { + orig *[]otlpcommon.KeyValue + state *State +} + +func GetOrigMap(ms Map) *[]otlpcommon.KeyValue { + return ms.orig +} + +func GetMapState(ms Map) *State { + return ms.state +} + +func NewMap(orig *[]otlpcommon.KeyValue, state *State) Map { + return Map{orig: orig, state: state} +} + +func GenerateTestMap() Map { + var orig []otlpcommon.KeyValue + state := StateMutable + ms := NewMap(&orig, &state) + FillTestMap(ms) + return ms +} + +func FillTestMap(dest Map) { + *dest.orig = nil + *dest.orig = append(*dest.orig, otlpcommon.KeyValue{Key: "k", Value: otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: "v"}}}) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go new file mode 100644 index 000000000..85be497ea --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_metrics.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +import ( + otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +type Metrics struct { + orig *otlpcollectormetrics.ExportMetricsServiceRequest + state *State +} + +func GetOrigMetrics(ms Metrics) *otlpcollectormetrics.ExportMetricsServiceRequest { + return ms.orig +} + +func GetMetricsState(ms Metrics) *State { + return ms.state +} + +func SetMetricsState(ms Metrics, state State) { + *ms.state = state +} + +func NewMetrics(orig *otlpcollectormetrics.ExportMetricsServiceRequest, state *State) Metrics { + return Metrics{orig: orig, state: state} +} + +// MetricsToProto internal helper to convert Metrics to protobuf representation. +func MetricsToProto(l Metrics) otlpmetrics.MetricsData { + return otlpmetrics.MetricsData{ + ResourceMetrics: l.orig.ResourceMetrics, + } +} + +// MetricsFromProto internal helper to convert protobuf representation to Metrics. +// This function set exclusive state assuming that it's called only once per Metrics. +func MetricsFromProto(orig otlpmetrics.MetricsData) Metrics { + state := StateMutable + return NewMetrics(&otlpcollectormetrics.ExportMetricsServiceRequest{ + ResourceMetrics: orig.ResourceMetrics, + }, &state) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_slice.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_slice.go new file mode 100644 index 000000000..2f366199a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_slice.go @@ -0,0 +1,41 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +import ( + otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +type Slice struct { + orig *[]otlpcommon.AnyValue + state *State +} + +func GetOrigSlice(ms Slice) *[]otlpcommon.AnyValue { + return ms.orig +} + +func GetSliceState(ms Slice) *State { + return ms.state +} + +func NewSlice(orig *[]otlpcommon.AnyValue, state *State) Slice { + return Slice{orig: orig, state: state} +} + +func GenerateTestSlice() Slice { + orig := []otlpcommon.AnyValue{} + state := StateMutable + tv := NewSlice(&orig, &state) + FillTestSlice(tv) + return tv +} + +func FillTestSlice(tv Slice) { + *tv.orig = make([]otlpcommon.AnyValue, 7) + for i := 0; i < 7; i++ { + state := StateMutable + FillTestValue(NewValue(&(*tv.orig)[i], &state)) + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go new file mode 100644 index 000000000..5a4cdadbd --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_traces.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +import ( + otlpcollectortrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1" + otlptrace "go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1" +) + +type Traces struct { + orig *otlpcollectortrace.ExportTraceServiceRequest + state *State +} + +func GetOrigTraces(ms Traces) *otlpcollectortrace.ExportTraceServiceRequest { + return ms.orig +} + +func GetTracesState(ms Traces) *State { + return ms.state +} + +func SetTracesState(ms Traces, state State) { + *ms.state = state +} + +func NewTraces(orig *otlpcollectortrace.ExportTraceServiceRequest, state *State) Traces { + return Traces{orig: orig, state: state} +} + +// TracesToProto internal helper to convert Traces to protobuf representation. +func TracesToProto(l Traces) otlptrace.TracesData { + return otlptrace.TracesData{ + ResourceSpans: l.orig.ResourceSpans, + } +} + +// TracesFromProto internal helper to convert protobuf representation to Traces. +// This function set exclusive state assuming that it's called only once per Traces. +func TracesFromProto(orig otlptrace.TracesData) Traces { + state := StateMutable + return NewTraces(&otlpcollectortrace.ExportTraceServiceRequest{ + ResourceSpans: orig.ResourceSpans, + }, &state) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go new file mode 100644 index 000000000..1a2f79f89 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_tracestate.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +type TraceState struct { + orig *string + state *State +} + +func GetOrigTraceState(ms TraceState) *string { + return ms.orig +} + +func GetTraceStateState(ms TraceState) *State { + return ms.state +} + +func NewTraceState(orig *string, state *State) TraceState { + return TraceState{orig: orig, state: state} +} + +func GenerateTestTraceState() TraceState { + var orig string + state := StateMutable + ms := NewTraceState(&orig, &state) + FillTestTraceState(ms) + return ms +} + +func FillTestTraceState(dest TraceState) { + *dest.orig = "rojo=00f067aa0ba902b7" +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go new file mode 100644 index 000000000..0d5225052 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/internal/wrapper_value.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package internal // import "go.opentelemetry.io/collector/pdata/internal" + +import ( + otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +type Value struct { + orig *otlpcommon.AnyValue + state *State +} + +func GetOrigValue(ms Value) *otlpcommon.AnyValue { + return ms.orig +} + +func GetValueState(ms Value) *State { + return ms.state +} + +func NewValue(orig *otlpcommon.AnyValue, state *State) Value { + return Value{orig: orig, state: state} +} + +func FillTestValue(dest Value) { + dest.orig.Value = &otlpcommon.AnyValue_StringValue{StringValue: "v"} +} + +func GenerateTestValue() Value { + var orig otlpcommon.AnyValue + state := StateMutable + ms := NewValue(&orig, &state) + FillTestValue(ms) + return ms +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go new file mode 100644 index 000000000..cbb64987d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_byteslice.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "go.opentelemetry.io/collector/pdata/internal" +) + +// ByteSlice represents a []byte slice. +// The instance of ByteSlice can be assigned to multiple objects since it's immutable. +// +// Must use NewByteSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ByteSlice internal.ByteSlice + +func (ms ByteSlice) getOrig() *[]byte { + return internal.GetOrigByteSlice(internal.ByteSlice(ms)) +} + +func (ms ByteSlice) getState() *internal.State { + return internal.GetByteSliceState(internal.ByteSlice(ms)) +} + +// NewByteSlice creates a new empty ByteSlice. +func NewByteSlice() ByteSlice { + orig := []byte(nil) + state := internal.StateMutable + return ByteSlice(internal.NewByteSlice(&orig, &state)) +} + +// AsRaw returns a copy of the []byte slice. +func (ms ByteSlice) AsRaw() []byte { + return copyByteSlice(nil, *ms.getOrig()) +} + +// FromRaw copies raw []byte into the slice ByteSlice. +func (ms ByteSlice) FromRaw(val []byte) { + ms.getState().AssertMutable() + *ms.getOrig() = copyByteSlice(*ms.getOrig(), val) +} + +// Len returns length of the []byte slice value. +// Equivalent of len(byteSlice). +func (ms ByteSlice) Len() int { + return len(*ms.getOrig()) +} + +// At returns an item from particular index. +// Equivalent of byteSlice[i]. +func (ms ByteSlice) At(i int) byte { + return (*ms.getOrig())[i] +} + +// SetAt sets byte item at particular index. +// Equivalent of byteSlice[i] = val +func (ms ByteSlice) SetAt(i int, val byte) { + ms.getState().AssertMutable() + (*ms.getOrig())[i] = val +} + +// EnsureCapacity ensures ByteSlice has at least the specified capacity. +// 1. If the newCap <= cap, then is no change in capacity. +// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of: +// buf := make([]byte, len(byteSlice), newCap) +// copy(buf, byteSlice) +// byteSlice = buf +func (ms ByteSlice) EnsureCapacity(newCap int) { + ms.getState().AssertMutable() + oldCap := cap(*ms.getOrig()) + if newCap <= oldCap { + return + } + + newOrig := make([]byte, len(*ms.getOrig()), newCap) + copy(newOrig, *ms.getOrig()) + *ms.getOrig() = newOrig +} + +// Append appends extra elements to ByteSlice. +// Equivalent of byteSlice = append(byteSlice, elms...) +func (ms ByteSlice) Append(elms ...byte) { + ms.getState().AssertMutable() + *ms.getOrig() = append(*ms.getOrig(), elms...) +} + +// MoveTo moves all elements from the current slice overriding the destination and +// resetting the current instance to its zero value. +func (ms ByteSlice) MoveTo(dest ByteSlice) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() + *ms.getOrig() = nil +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (ms ByteSlice) CopyTo(dest ByteSlice) { + dest.getState().AssertMutable() + *dest.getOrig() = copyByteSlice(*dest.getOrig(), *ms.getOrig()) +} + +func copyByteSlice(dst, src []byte) []byte { + dst = dst[:0] + return append(dst, src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go new file mode 100644 index 000000000..83a07ccf4 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_float64slice.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "go.opentelemetry.io/collector/pdata/internal" +) + +// Float64Slice represents a []float64 slice. +// The instance of Float64Slice can be assigned to multiple objects since it's immutable. +// +// Must use NewFloat64Slice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Float64Slice internal.Float64Slice + +func (ms Float64Slice) getOrig() *[]float64 { + return internal.GetOrigFloat64Slice(internal.Float64Slice(ms)) +} + +func (ms Float64Slice) getState() *internal.State { + return internal.GetFloat64SliceState(internal.Float64Slice(ms)) +} + +// NewFloat64Slice creates a new empty Float64Slice. +func NewFloat64Slice() Float64Slice { + orig := []float64(nil) + state := internal.StateMutable + return Float64Slice(internal.NewFloat64Slice(&orig, &state)) +} + +// AsRaw returns a copy of the []float64 slice. +func (ms Float64Slice) AsRaw() []float64 { + return copyFloat64Slice(nil, *ms.getOrig()) +} + +// FromRaw copies raw []float64 into the slice Float64Slice. +func (ms Float64Slice) FromRaw(val []float64) { + ms.getState().AssertMutable() + *ms.getOrig() = copyFloat64Slice(*ms.getOrig(), val) +} + +// Len returns length of the []float64 slice value. +// Equivalent of len(float64Slice). +func (ms Float64Slice) Len() int { + return len(*ms.getOrig()) +} + +// At returns an item from particular index. +// Equivalent of float64Slice[i]. +func (ms Float64Slice) At(i int) float64 { + return (*ms.getOrig())[i] +} + +// SetAt sets float64 item at particular index. +// Equivalent of float64Slice[i] = val +func (ms Float64Slice) SetAt(i int, val float64) { + ms.getState().AssertMutable() + (*ms.getOrig())[i] = val +} + +// EnsureCapacity ensures Float64Slice has at least the specified capacity. +// 1. If the newCap <= cap, then is no change in capacity. +// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of: +// buf := make([]float64, len(float64Slice), newCap) +// copy(buf, float64Slice) +// float64Slice = buf +func (ms Float64Slice) EnsureCapacity(newCap int) { + ms.getState().AssertMutable() + oldCap := cap(*ms.getOrig()) + if newCap <= oldCap { + return + } + + newOrig := make([]float64, len(*ms.getOrig()), newCap) + copy(newOrig, *ms.getOrig()) + *ms.getOrig() = newOrig +} + +// Append appends extra elements to Float64Slice. +// Equivalent of float64Slice = append(float64Slice, elms...) +func (ms Float64Slice) Append(elms ...float64) { + ms.getState().AssertMutable() + *ms.getOrig() = append(*ms.getOrig(), elms...) +} + +// MoveTo moves all elements from the current slice overriding the destination and +// resetting the current instance to its zero value. +func (ms Float64Slice) MoveTo(dest Float64Slice) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() + *ms.getOrig() = nil +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (ms Float64Slice) CopyTo(dest Float64Slice) { + dest.getState().AssertMutable() + *dest.getOrig() = copyFloat64Slice(*dest.getOrig(), *ms.getOrig()) +} + +func copyFloat64Slice(dst, src []float64) []float64 { + dst = dst[:0] + return append(dst, src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go new file mode 100644 index 000000000..8a5d23b54 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_instrumentationscope.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +// InstrumentationScope is a message representing the instrumentation scope information. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewInstrumentationScope function to create new instances. +// Important: zero-initialized instance is not valid for use. +type InstrumentationScope internal.InstrumentationScope + +func newInstrumentationScope(orig *otlpcommon.InstrumentationScope, state *internal.State) InstrumentationScope { + return InstrumentationScope(internal.NewInstrumentationScope(orig, state)) +} + +// NewInstrumentationScope creates a new empty InstrumentationScope. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewInstrumentationScope() InstrumentationScope { + state := internal.StateMutable + return newInstrumentationScope(&otlpcommon.InstrumentationScope{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms InstrumentationScope) MoveTo(dest InstrumentationScope) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() + *ms.getOrig() = otlpcommon.InstrumentationScope{} +} + +func (ms InstrumentationScope) getOrig() *otlpcommon.InstrumentationScope { + return internal.GetOrigInstrumentationScope(internal.InstrumentationScope(ms)) +} + +func (ms InstrumentationScope) getState() *internal.State { + return internal.GetInstrumentationScopeState(internal.InstrumentationScope(ms)) +} + +// Name returns the name associated with this InstrumentationScope. +func (ms InstrumentationScope) Name() string { + return ms.getOrig().Name +} + +// SetName replaces the name associated with this InstrumentationScope. +func (ms InstrumentationScope) SetName(v string) { + ms.getState().AssertMutable() + ms.getOrig().Name = v +} + +// Version returns the version associated with this InstrumentationScope. +func (ms InstrumentationScope) Version() string { + return ms.getOrig().Version +} + +// SetVersion replaces the version associated with this InstrumentationScope. +func (ms InstrumentationScope) SetVersion(v string) { + ms.getState().AssertMutable() + ms.getOrig().Version = v +} + +// Attributes returns the Attributes associated with this InstrumentationScope. +func (ms InstrumentationScope) Attributes() Map { + return Map(internal.NewMap(&ms.getOrig().Attributes, internal.GetInstrumentationScopeState(internal.InstrumentationScope(ms)))) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this InstrumentationScope. +func (ms InstrumentationScope) DroppedAttributesCount() uint32 { + return ms.getOrig().DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this InstrumentationScope. +func (ms InstrumentationScope) SetDroppedAttributesCount(v uint32) { + ms.getState().AssertMutable() + ms.getOrig().DroppedAttributesCount = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms InstrumentationScope) CopyTo(dest InstrumentationScope) { + dest.getState().AssertMutable() + dest.SetName(ms.Name()) + dest.SetVersion(ms.Version()) + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go new file mode 100644 index 000000000..12e6cfa7f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_resource.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpresource "go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1" +) + +// Resource is a message representing the resource information. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResource function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Resource internal.Resource + +func newResource(orig *otlpresource.Resource, state *internal.State) Resource { + return Resource(internal.NewResource(orig, state)) +} + +// NewResource creates a new empty Resource. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewResource() Resource { + state := internal.StateMutable + return newResource(&otlpresource.Resource{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Resource) MoveTo(dest Resource) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() + *ms.getOrig() = otlpresource.Resource{} +} + +func (ms Resource) getOrig() *otlpresource.Resource { + return internal.GetOrigResource(internal.Resource(ms)) +} + +func (ms Resource) getState() *internal.State { + return internal.GetResourceState(internal.Resource(ms)) +} + +// Attributes returns the Attributes associated with this Resource. +func (ms Resource) Attributes() Map { + return Map(internal.NewMap(&ms.getOrig().Attributes, internal.GetResourceState(internal.Resource(ms)))) +} + +// DroppedAttributesCount returns the droppedattributescount associated with this Resource. +func (ms Resource) DroppedAttributesCount() uint32 { + return ms.getOrig().DroppedAttributesCount +} + +// SetDroppedAttributesCount replaces the droppedattributescount associated with this Resource. +func (ms Resource) SetDroppedAttributesCount(v uint32) { + ms.getState().AssertMutable() + ms.getOrig().DroppedAttributesCount = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Resource) CopyTo(dest Resource) { + dest.getState().AssertMutable() + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetDroppedAttributesCount(ms.DroppedAttributesCount()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go new file mode 100644 index 000000000..1344ca35b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/generated_uint64slice.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pcommon + +import ( + "go.opentelemetry.io/collector/pdata/internal" +) + +// UInt64Slice represents a []uint64 slice. +// The instance of UInt64Slice can be assigned to multiple objects since it's immutable. +// +// Must use NewUInt64Slice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type UInt64Slice internal.UInt64Slice + +func (ms UInt64Slice) getOrig() *[]uint64 { + return internal.GetOrigUInt64Slice(internal.UInt64Slice(ms)) +} + +func (ms UInt64Slice) getState() *internal.State { + return internal.GetUInt64SliceState(internal.UInt64Slice(ms)) +} + +// NewUInt64Slice creates a new empty UInt64Slice. +func NewUInt64Slice() UInt64Slice { + orig := []uint64(nil) + state := internal.StateMutable + return UInt64Slice(internal.NewUInt64Slice(&orig, &state)) +} + +// AsRaw returns a copy of the []uint64 slice. +func (ms UInt64Slice) AsRaw() []uint64 { + return copyUInt64Slice(nil, *ms.getOrig()) +} + +// FromRaw copies raw []uint64 into the slice UInt64Slice. +func (ms UInt64Slice) FromRaw(val []uint64) { + ms.getState().AssertMutable() + *ms.getOrig() = copyUInt64Slice(*ms.getOrig(), val) +} + +// Len returns length of the []uint64 slice value. +// Equivalent of len(uInt64Slice). +func (ms UInt64Slice) Len() int { + return len(*ms.getOrig()) +} + +// At returns an item from particular index. +// Equivalent of uInt64Slice[i]. +func (ms UInt64Slice) At(i int) uint64 { + return (*ms.getOrig())[i] +} + +// SetAt sets uint64 item at particular index. +// Equivalent of uInt64Slice[i] = val +func (ms UInt64Slice) SetAt(i int, val uint64) { + ms.getState().AssertMutable() + (*ms.getOrig())[i] = val +} + +// EnsureCapacity ensures UInt64Slice has at least the specified capacity. +// 1. If the newCap <= cap, then is no change in capacity. +// 2. If the newCap > cap, then the slice capacity will be expanded to the provided value which will be equivalent of: +// buf := make([]uint64, len(uInt64Slice), newCap) +// copy(buf, uInt64Slice) +// uInt64Slice = buf +func (ms UInt64Slice) EnsureCapacity(newCap int) { + ms.getState().AssertMutable() + oldCap := cap(*ms.getOrig()) + if newCap <= oldCap { + return + } + + newOrig := make([]uint64, len(*ms.getOrig()), newCap) + copy(newOrig, *ms.getOrig()) + *ms.getOrig() = newOrig +} + +// Append appends extra elements to UInt64Slice. +// Equivalent of uInt64Slice = append(uInt64Slice, elms...) +func (ms UInt64Slice) Append(elms ...uint64) { + ms.getState().AssertMutable() + *ms.getOrig() = append(*ms.getOrig(), elms...) +} + +// MoveTo moves all elements from the current slice overriding the destination and +// resetting the current instance to its zero value. +func (ms UInt64Slice) MoveTo(dest UInt64Slice) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() + *ms.getOrig() = nil +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (ms UInt64Slice) CopyTo(dest UInt64Slice) { + dest.getState().AssertMutable() + *dest.getOrig() = copyUInt64Slice(*dest.getOrig(), *ms.getOrig()) +} + +func copyUInt64Slice(dst, src []uint64) []uint64 { + dst = dst[:0] + return append(dst, src...) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go new file mode 100644 index 000000000..c41aa9175 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/map.go @@ -0,0 +1,280 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon" + +import ( + "go.uber.org/multierr" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +// Map stores a map of string keys to elements of Value type. +type Map internal.Map + +// NewMap creates a Map with 0 elements. +func NewMap() Map { + orig := []otlpcommon.KeyValue(nil) + state := internal.StateMutable + return Map(internal.NewMap(&orig, &state)) +} + +func (m Map) getOrig() *[]otlpcommon.KeyValue { + return internal.GetOrigMap(internal.Map(m)) +} + +func (m Map) getState() *internal.State { + return internal.GetMapState(internal.Map(m)) +} + +func newMap(orig *[]otlpcommon.KeyValue, state *internal.State) Map { + return Map(internal.NewMap(orig, state)) +} + +// Clear erases any existing entries in this Map instance. +func (m Map) Clear() { + m.getState().AssertMutable() + *m.getOrig() = nil +} + +// EnsureCapacity increases the capacity of this Map instance, if necessary, +// to ensure that it can hold at least the number of elements specified by the capacity argument. +func (m Map) EnsureCapacity(capacity int) { + m.getState().AssertMutable() + oldOrig := *m.getOrig() + if capacity <= cap(oldOrig) { + return + } + *m.getOrig() = make([]otlpcommon.KeyValue, len(oldOrig), capacity) + copy(*m.getOrig(), oldOrig) +} + +// Get returns the Value associated with the key and true. Returned +// Value is not a copy, it is a reference to the value stored in this map. +// It is allowed to modify the returned value using Value.Set* functions. +// Such modification will be applied to the value stored in this map. +// +// If the key does not exist returns an invalid instance of the KeyValue and false. +// Calling any functions on the returned invalid instance will cause a panic. +func (m Map) Get(key string) (Value, bool) { + for i := range *m.getOrig() { + akv := &(*m.getOrig())[i] + if akv.Key == key { + return newValue(&akv.Value, m.getState()), true + } + } + return newValue(nil, m.getState()), false +} + +// Remove removes the entry associated with the key and returns true if the key +// was present in the map, otherwise returns false. +func (m Map) Remove(key string) bool { + m.getState().AssertMutable() + for i := range *m.getOrig() { + akv := &(*m.getOrig())[i] + if akv.Key == key { + *akv = (*m.getOrig())[len(*m.getOrig())-1] + *m.getOrig() = (*m.getOrig())[:len(*m.getOrig())-1] + return true + } + } + return false +} + +// RemoveIf removes the entries for which the function in question returns true +func (m Map) RemoveIf(f func(string, Value) bool) { + m.getState().AssertMutable() + newLen := 0 + for i := 0; i < len(*m.getOrig()); i++ { + akv := &(*m.getOrig())[i] + if f(akv.Key, newValue(&akv.Value, m.getState())) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*m.getOrig())[newLen] = (*m.getOrig())[i] + newLen++ + } + *m.getOrig() = (*m.getOrig())[:newLen] +} + +// PutEmpty inserts or updates an empty value to the map under given key +// and return the updated/inserted value. +func (m Map) PutEmpty(k string) Value { + m.getState().AssertMutable() + if av, existing := m.Get(k); existing { + av.getOrig().Value = nil + return newValue(av.getOrig(), m.getState()) + } + *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k}) + return newValue(&(*m.getOrig())[len(*m.getOrig())-1].Value, m.getState()) +} + +// PutStr performs the Insert or Update action. The Value is +// inserted to the map that did not originally have the key. The key/value is +// updated to the map where the key already existed. +func (m Map) PutStr(k string, v string) { + m.getState().AssertMutable() + if av, existing := m.Get(k); existing { + av.SetStr(v) + } else { + *m.getOrig() = append(*m.getOrig(), newKeyValueString(k, v)) + } +} + +// PutInt performs the Insert or Update action. The int Value is +// inserted to the map that did not originally have the key. The key/value is +// updated to the map where the key already existed. +func (m Map) PutInt(k string, v int64) { + m.getState().AssertMutable() + if av, existing := m.Get(k); existing { + av.SetInt(v) + } else { + *m.getOrig() = append(*m.getOrig(), newKeyValueInt(k, v)) + } +} + +// PutDouble performs the Insert or Update action. The double Value is +// inserted to the map that did not originally have the key. The key/value is +// updated to the map where the key already existed. +func (m Map) PutDouble(k string, v float64) { + m.getState().AssertMutable() + if av, existing := m.Get(k); existing { + av.SetDouble(v) + } else { + *m.getOrig() = append(*m.getOrig(), newKeyValueDouble(k, v)) + } +} + +// PutBool performs the Insert or Update action. The bool Value is +// inserted to the map that did not originally have the key. The key/value is +// updated to the map where the key already existed. +func (m Map) PutBool(k string, v bool) { + m.getState().AssertMutable() + if av, existing := m.Get(k); existing { + av.SetBool(v) + } else { + *m.getOrig() = append(*m.getOrig(), newKeyValueBool(k, v)) + } +} + +// PutEmptyBytes inserts or updates an empty byte slice under given key and returns it. +func (m Map) PutEmptyBytes(k string) ByteSlice { + m.getState().AssertMutable() + bv := otlpcommon.AnyValue_BytesValue{} + if av, existing := m.Get(k); existing { + av.getOrig().Value = &bv + } else { + *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: &bv}}) + } + return ByteSlice(internal.NewByteSlice(&bv.BytesValue, m.getState())) +} + +// PutEmptyMap inserts or updates an empty map under given key and returns it. +func (m Map) PutEmptyMap(k string) Map { + m.getState().AssertMutable() + kvl := otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{Values: []otlpcommon.KeyValue(nil)}} + if av, existing := m.Get(k); existing { + av.getOrig().Value = &kvl + } else { + *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: &kvl}}) + } + return Map(internal.NewMap(&kvl.KvlistValue.Values, m.getState())) +} + +// PutEmptySlice inserts or updates an empty slice under given key and returns it. +func (m Map) PutEmptySlice(k string) Slice { + m.getState().AssertMutable() + vl := otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{Values: []otlpcommon.AnyValue(nil)}} + if av, existing := m.Get(k); existing { + av.getOrig().Value = &vl + } else { + *m.getOrig() = append(*m.getOrig(), otlpcommon.KeyValue{Key: k, Value: otlpcommon.AnyValue{Value: &vl}}) + } + return Slice(internal.NewSlice(&vl.ArrayValue.Values, m.getState())) +} + +// Len returns the length of this map. +// +// Because the Map is represented internally by a slice of pointers, and the data are comping from the wire, +// it is possible that when iterating using "Range" to get access to fewer elements because nil elements are skipped. +func (m Map) Len() int { + return len(*m.getOrig()) +} + +// Range calls f sequentially for each key and value present in the map. If f returns false, range stops the iteration. +// +// Example: +// +// sm.Range(func(k string, v Value) bool { +// ... +// }) +func (m Map) Range(f func(k string, v Value) bool) { + for i := range *m.getOrig() { + kv := &(*m.getOrig())[i] + if !f(kv.Key, Value(internal.NewValue(&kv.Value, m.getState()))) { + break + } + } +} + +// CopyTo copies all elements from the current map overriding the destination. +func (m Map) CopyTo(dest Map) { + dest.getState().AssertMutable() + newLen := len(*m.getOrig()) + oldCap := cap(*dest.getOrig()) + if newLen <= oldCap { + // New slice fits in existing slice, no need to reallocate. + *dest.getOrig() = (*dest.getOrig())[:newLen:oldCap] + for i := range *m.getOrig() { + akv := &(*m.getOrig())[i] + destAkv := &(*dest.getOrig())[i] + destAkv.Key = akv.Key + newValue(&akv.Value, m.getState()).CopyTo(newValue(&destAkv.Value, dest.getState())) + } + return + } + + // New slice is bigger than exist slice. Allocate new space. + origs := make([]otlpcommon.KeyValue, len(*m.getOrig())) + for i := range *m.getOrig() { + akv := &(*m.getOrig())[i] + origs[i].Key = akv.Key + newValue(&akv.Value, m.getState()).CopyTo(newValue(&origs[i].Value, dest.getState())) + } + *dest.getOrig() = origs +} + +// AsRaw returns a standard go map representation of this Map. +func (m Map) AsRaw() map[string]any { + rawMap := make(map[string]any) + m.Range(func(k string, v Value) bool { + rawMap[k] = v.AsRaw() + return true + }) + return rawMap +} + +// FromRaw overrides this Map instance from a standard go map. +func (m Map) FromRaw(rawMap map[string]any) error { + m.getState().AssertMutable() + if len(rawMap) == 0 { + *m.getOrig() = nil + return nil + } + + var errs error + origs := make([]otlpcommon.KeyValue, len(rawMap)) + ix := 0 + for k, iv := range rawMap { + origs[ix].Key = k + errs = multierr.Append(errs, newValue(&origs[ix].Value, m.getState()).FromRaw(iv)) + ix++ + } + *m.getOrig() = origs + return errs +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go new file mode 100644 index 000000000..5352ff44d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/slice.go @@ -0,0 +1,167 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon" + +import ( + "go.uber.org/multierr" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +// Slice logically represents a slice of Value. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Slice internal.Slice + +func newSlice(orig *[]otlpcommon.AnyValue, state *internal.State) Slice { + return Slice(internal.NewSlice(orig, state)) +} + +func (es Slice) getOrig() *[]otlpcommon.AnyValue { + return internal.GetOrigSlice(internal.Slice(es)) +} + +func (es Slice) getState() *internal.State { + return internal.GetSliceState(internal.Slice(es)) +} + +// NewSlice creates a Slice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewSlice() Slice { + orig := []otlpcommon.AnyValue(nil) + state := internal.StateMutable + return Slice(internal.NewSlice(&orig, &state)) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewSlice()". +func (es Slice) Len() int { + return len(*es.getOrig()) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es Slice) At(ix int) Value { + return newValue(&(*es.getOrig())[ix], es.getState()) +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es Slice) CopyTo(dest Slice) { + dest.getState().AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.getOrig()) + if srcLen <= destCap { + (*dest.getOrig()) = (*dest.getOrig())[:srcLen:destCap] + } else { + (*dest.getOrig()) = make([]otlpcommon.AnyValue, srcLen) + } + + for i := range *es.getOrig() { + newValue(&(*es.getOrig())[i], es.getState()).CopyTo(newValue(&(*dest.getOrig())[i], dest.getState())) + } +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new Slice can be initialized: +// +// es := NewSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es Slice) EnsureCapacity(newCap int) { + es.getState().AssertMutable() + oldCap := cap(*es.getOrig()) + if newCap <= oldCap { + return + } + + newOrig := make([]otlpcommon.AnyValue, len(*es.getOrig()), newCap) + copy(newOrig, *es.getOrig()) + *es.getOrig() = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Value. +// It returns the newly added Value. +func (es Slice) AppendEmpty() Value { + es.getState().AssertMutable() + *es.getOrig() = append(*es.getOrig(), otlpcommon.AnyValue{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es Slice) MoveAndAppendTo(dest Slice) { + es.getState().AssertMutable() + dest.getState().AssertMutable() + if *dest.getOrig() == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.getOrig() = *es.getOrig() + } else { + *dest.getOrig() = append(*dest.getOrig(), *es.getOrig()...) + } + *es.getOrig() = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es Slice) RemoveIf(f func(Value) bool) { + es.getState().AssertMutable() + newLen := 0 + for i := 0; i < len(*es.getOrig()); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.getOrig())[newLen] = (*es.getOrig())[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.getOrig() = (*es.getOrig())[:newLen] +} + +// AsRaw return []any copy of the Slice. +func (es Slice) AsRaw() []any { + rawSlice := make([]any, 0, es.Len()) + for i := 0; i < es.Len(); i++ { + rawSlice = append(rawSlice, es.At(i).AsRaw()) + } + return rawSlice +} + +// FromRaw copies []any into the Slice. +func (es Slice) FromRaw(rawSlice []any) error { + es.getState().AssertMutable() + if len(rawSlice) == 0 { + *es.getOrig() = nil + return nil + } + var errs error + origs := make([]otlpcommon.AnyValue, len(rawSlice)) + for ix, iv := range rawSlice { + errs = multierr.Append(errs, newValue(&origs[ix], es.getState()).FromRaw(iv)) + } + *es.getOrig() = origs + return errs +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go new file mode 100644 index 000000000..63399cb58 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/spanid.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon" +import ( + "encoding/hex" + + "go.opentelemetry.io/collector/pdata/internal/data" +) + +var emptySpanID = SpanID([8]byte{}) + +// SpanID is span identifier. +type SpanID [8]byte + +// NewSpanIDEmpty returns a new empty (all zero bytes) SpanID. +func NewSpanIDEmpty() SpanID { + return emptySpanID +} + +// String returns string representation of the SpanID. +// +// Important: Don't rely on this method to get a string identifier of SpanID, +// Use hex.EncodeToString explicitly instead. +// This method meant to implement Stringer interface for display purposes only. +func (ms SpanID) String() string { + if ms.IsEmpty() { + return "" + } + return hex.EncodeToString(ms[:]) +} + +// IsEmpty returns true if id doesn't contain at least one non-zero byte. +func (ms SpanID) IsEmpty() bool { + return data.SpanID(ms).IsEmpty() +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go new file mode 100644 index 000000000..5fd1758b1 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon" + +import ( + "time" +) + +// Timestamp is a time specified as UNIX Epoch time in nanoseconds since +// 1970-01-01 00:00:00 +0000 UTC. +type Timestamp uint64 + +// NewTimestampFromTime constructs a new Timestamp from the provided time.Time. +func NewTimestampFromTime(t time.Time) Timestamp { + return Timestamp(uint64(t.UnixNano())) +} + +// AsTime converts this to a time.Time. +func (ts Timestamp) AsTime() time.Time { + return time.Unix(0, int64(ts)).UTC() +} + +// String returns the string representation of this in UTC. +func (ts Timestamp) String() string { + return ts.AsTime().String() +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go new file mode 100644 index 000000000..dd163629c --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/trace_state.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon" + +import ( + "go.opentelemetry.io/collector/pdata/internal" +) + +// TraceState represents the trace state from the w3c-trace-context. +type TraceState internal.TraceState + +func NewTraceState() TraceState { + state := internal.StateMutable + return TraceState(internal.NewTraceState(new(string), &state)) +} + +func (ms TraceState) getOrig() *string { + return internal.GetOrigTraceState(internal.TraceState(ms)) +} + +func (ms TraceState) getState() *internal.State { + return internal.GetTraceStateState(internal.TraceState(ms)) +} + +// AsRaw returns the string representation of the tracestate in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header +func (ms TraceState) AsRaw() string { + return *ms.getOrig() +} + +// FromRaw copies the string representation in w3c-trace-context format of the tracestate into this TraceState. +func (ms TraceState) FromRaw(v string) { + ms.getState().AssertMutable() + *ms.getOrig() = v +} + +// MoveTo moves the TraceState instance overriding the destination +// and resetting the current instance to its zero value. +func (ms TraceState) MoveTo(dest TraceState) { + ms.getState().AssertMutable() + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() + *ms.getOrig() = "" +} + +// CopyTo copies the TraceState instance overriding the destination. +func (ms TraceState) CopyTo(dest TraceState) { + dest.getState().AssertMutable() + *dest.getOrig() = *ms.getOrig() +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go new file mode 100644 index 000000000..22ad5a5af --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/traceid.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon" + +import ( + "encoding/hex" + + "go.opentelemetry.io/collector/pdata/internal/data" +) + +var emptyTraceID = TraceID([16]byte{}) + +// TraceID is a trace identifier. +type TraceID [16]byte + +// NewTraceIDEmpty returns a new empty (all zero bytes) TraceID. +func NewTraceIDEmpty() TraceID { + return emptyTraceID +} + +// String returns string representation of the TraceID. +// +// Important: Don't rely on this method to get a string identifier of TraceID. +// Use hex.EncodeToString explicitly instead. +// This method meant to implement Stringer interface for display purposes only. +func (ms TraceID) String() string { + if ms.IsEmpty() { + return "" + } + return hex.EncodeToString(ms[:]) +} + +// IsEmpty returns true if id doesn't contain at least one non-zero byte. +func (ms TraceID) IsEmpty() bool { + return data.TraceID(ms).IsEmpty() +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go new file mode 100644 index 000000000..2e0443dd3 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go @@ -0,0 +1,489 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pcommon // import "go.opentelemetry.io/collector/pdata/pcommon" + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "math" + "strconv" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcommon "go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1" +) + +// ValueType specifies the type of Value. +type ValueType int32 + +const ( + ValueTypeEmpty ValueType = iota + ValueTypeStr + ValueTypeInt + ValueTypeDouble + ValueTypeBool + ValueTypeMap + ValueTypeSlice + ValueTypeBytes +) + +// String returns the string representation of the ValueType. +func (avt ValueType) String() string { + switch avt { + case ValueTypeEmpty: + return "Empty" + case ValueTypeStr: + return "Str" + case ValueTypeBool: + return "Bool" + case ValueTypeInt: + return "Int" + case ValueTypeDouble: + return "Double" + case ValueTypeMap: + return "Map" + case ValueTypeSlice: + return "Slice" + case ValueTypeBytes: + return "Bytes" + } + return "" +} + +// Value is a mutable cell containing any value. Typically used as an element of Map or Slice. +// Must use one of NewValue+ functions below to create new instances. +// +// Intended to be passed by value since internally it is just a pointer to actual +// value representation. For the same reason passing by value and calling setters +// will modify the original, e.g.: +// +// func f1(val Value) { val.SetInt(234) } +// func f2() { +// v := NewValueStr("a string") +// f1(v) +// _ := v.Type() // this will return ValueTypeInt +// } +// +// Important: zero-initialized instance is not valid for use. All Value functions below must +// be called only on instances that are created via NewValue+ functions. +type Value internal.Value + +// NewValueEmpty creates a new Value with an empty value. +func NewValueEmpty() Value { + state := internal.StateMutable + return newValue(&otlpcommon.AnyValue{}, &state) +} + +// NewValueStr creates a new Value with the given string value. +func NewValueStr(v string) Value { + state := internal.StateMutable + return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_StringValue{StringValue: v}}, &state) +} + +// NewValueInt creates a new Value with the given int64 value. +func NewValueInt(v int64) Value { + state := internal.StateMutable + return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_IntValue{IntValue: v}}, &state) +} + +// NewValueDouble creates a new Value with the given float64 value. +func NewValueDouble(v float64) Value { + state := internal.StateMutable + return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_DoubleValue{DoubleValue: v}}, &state) +} + +// NewValueBool creates a new Value with the given bool value. +func NewValueBool(v bool) Value { + state := internal.StateMutable + return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_BoolValue{BoolValue: v}}, &state) +} + +// NewValueMap creates a new Value of map type. +func NewValueMap() Value { + state := internal.StateMutable + return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{}}}, &state) +} + +// NewValueSlice creates a new Value of array type. +func NewValueSlice() Value { + state := internal.StateMutable + return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{}}}, &state) +} + +// NewValueBytes creates a new empty Value of byte type. +func NewValueBytes() Value { + state := internal.StateMutable + return newValue(&otlpcommon.AnyValue{Value: &otlpcommon.AnyValue_BytesValue{BytesValue: nil}}, &state) +} + +func newValue(orig *otlpcommon.AnyValue, state *internal.State) Value { + return Value(internal.NewValue(orig, state)) +} + +func (v Value) getOrig() *otlpcommon.AnyValue { + return internal.GetOrigValue(internal.Value(v)) +} + +func (v Value) getState() *internal.State { + return internal.GetValueState(internal.Value(v)) +} + +func (v Value) FromRaw(iv any) error { + switch tv := iv.(type) { + case nil: + v.getOrig().Value = nil + case string: + v.SetStr(tv) + case int: + v.SetInt(int64(tv)) + case int8: + v.SetInt(int64(tv)) + case int16: + v.SetInt(int64(tv)) + case int32: + v.SetInt(int64(tv)) + case int64: + v.SetInt(tv) + case uint: + v.SetInt(int64(tv)) + case uint8: + v.SetInt(int64(tv)) + case uint16: + v.SetInt(int64(tv)) + case uint32: + v.SetInt(int64(tv)) + case uint64: + v.SetInt(int64(tv)) + case float32: + v.SetDouble(float64(tv)) + case float64: + v.SetDouble(tv) + case bool: + v.SetBool(tv) + case []byte: + v.SetEmptyBytes().FromRaw(tv) + case map[string]any: + return v.SetEmptyMap().FromRaw(tv) + case []any: + return v.SetEmptySlice().FromRaw(tv) + default: + return fmt.Errorf("", tv) + } + return nil +} + +// Type returns the type of the value for this Value. +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) Type() ValueType { + switch v.getOrig().Value.(type) { + case *otlpcommon.AnyValue_StringValue: + return ValueTypeStr + case *otlpcommon.AnyValue_BoolValue: + return ValueTypeBool + case *otlpcommon.AnyValue_IntValue: + return ValueTypeInt + case *otlpcommon.AnyValue_DoubleValue: + return ValueTypeDouble + case *otlpcommon.AnyValue_KvlistValue: + return ValueTypeMap + case *otlpcommon.AnyValue_ArrayValue: + return ValueTypeSlice + case *otlpcommon.AnyValue_BytesValue: + return ValueTypeBytes + } + return ValueTypeEmpty +} + +// Str returns the string value associated with this Value. +// The shorter name is used instead of String to avoid implementing fmt.Stringer interface. +// If the Type() is not ValueTypeStr then returns empty string. +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) Str() string { + return v.getOrig().GetStringValue() +} + +// Int returns the int64 value associated with this Value. +// If the Type() is not ValueTypeInt then returns int64(0). +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) Int() int64 { + return v.getOrig().GetIntValue() +} + +// Double returns the float64 value associated with this Value. +// If the Type() is not ValueTypeDouble then returns float64(0). +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) Double() float64 { + return v.getOrig().GetDoubleValue() +} + +// Bool returns the bool value associated with this Value. +// If the Type() is not ValueTypeBool then returns false. +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) Bool() bool { + return v.getOrig().GetBoolValue() +} + +// Map returns the map value associated with this Value. +// If the Type() is not ValueTypeMap then returns an invalid map. Note that using +// such map can cause panic. +// +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) Map() Map { + kvlist := v.getOrig().GetKvlistValue() + if kvlist == nil { + return Map{} + } + return newMap(&kvlist.Values, internal.GetValueState(internal.Value(v))) +} + +// Slice returns the slice value associated with this Value. +// If the Type() is not ValueTypeSlice then returns an invalid slice. Note that using +// such slice can cause panic. +// +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) Slice() Slice { + arr := v.getOrig().GetArrayValue() + if arr == nil { + return Slice{} + } + return newSlice(&arr.Values, internal.GetValueState(internal.Value(v))) +} + +// Bytes returns the ByteSlice value associated with this Value. +// If the Type() is not ValueTypeBytes then returns an invalid ByteSlice object. Note that using +// such slice can cause panic. +// +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) Bytes() ByteSlice { + bv, ok := v.getOrig().GetValue().(*otlpcommon.AnyValue_BytesValue) + if !ok { + return ByteSlice{} + } + return ByteSlice(internal.NewByteSlice(&bv.BytesValue, internal.GetValueState(internal.Value(v)))) +} + +// SetStr replaces the string value associated with this Value, +// it also changes the type to be ValueTypeStr. +// The shorter name is used instead of SetString to avoid implementing +// fmt.Stringer interface by the corresponding getter method. +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) SetStr(sv string) { + v.getState().AssertMutable() + v.getOrig().Value = &otlpcommon.AnyValue_StringValue{StringValue: sv} +} + +// SetInt replaces the int64 value associated with this Value, +// it also changes the type to be ValueTypeInt. +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) SetInt(iv int64) { + v.getState().AssertMutable() + v.getOrig().Value = &otlpcommon.AnyValue_IntValue{IntValue: iv} +} + +// SetDouble replaces the float64 value associated with this Value, +// it also changes the type to be ValueTypeDouble. +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) SetDouble(dv float64) { + v.getState().AssertMutable() + v.getOrig().Value = &otlpcommon.AnyValue_DoubleValue{DoubleValue: dv} +} + +// SetBool replaces the bool value associated with this Value, +// it also changes the type to be ValueTypeBool. +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) SetBool(bv bool) { + v.getState().AssertMutable() + v.getOrig().Value = &otlpcommon.AnyValue_BoolValue{BoolValue: bv} +} + +// SetEmptyBytes sets value to an empty byte slice and returns it. +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) SetEmptyBytes() ByteSlice { + v.getState().AssertMutable() + bv := otlpcommon.AnyValue_BytesValue{BytesValue: nil} + v.getOrig().Value = &bv + return ByteSlice(internal.NewByteSlice(&bv.BytesValue, v.getState())) +} + +// SetEmptyMap sets value to an empty map and returns it. +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) SetEmptyMap() Map { + v.getState().AssertMutable() + kv := &otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{}} + v.getOrig().Value = kv + return newMap(&kv.KvlistValue.Values, v.getState()) +} + +// SetEmptySlice sets value to an empty slice and returns it. +// Calling this function on zero-initialized Value will cause a panic. +func (v Value) SetEmptySlice() Slice { + v.getState().AssertMutable() + av := &otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{}} + v.getOrig().Value = av + return newSlice(&av.ArrayValue.Values, v.getState()) +} + +// CopyTo copies the Value instance overriding the destination. +func (v Value) CopyTo(dest Value) { + dest.getState().AssertMutable() + destOrig := dest.getOrig() + switch ov := v.getOrig().Value.(type) { + case *otlpcommon.AnyValue_KvlistValue: + kv, ok := destOrig.Value.(*otlpcommon.AnyValue_KvlistValue) + if !ok { + kv = &otlpcommon.AnyValue_KvlistValue{KvlistValue: &otlpcommon.KeyValueList{}} + destOrig.Value = kv + } + if ov.KvlistValue == nil { + kv.KvlistValue = nil + return + } + // Deep copy to dest. + newMap(&ov.KvlistValue.Values, v.getState()).CopyTo(newMap(&kv.KvlistValue.Values, dest.getState())) + case *otlpcommon.AnyValue_ArrayValue: + av, ok := destOrig.Value.(*otlpcommon.AnyValue_ArrayValue) + if !ok { + av = &otlpcommon.AnyValue_ArrayValue{ArrayValue: &otlpcommon.ArrayValue{}} + destOrig.Value = av + } + if ov.ArrayValue == nil { + av.ArrayValue = nil + return + } + // Deep copy to dest. + newSlice(&ov.ArrayValue.Values, v.getState()).CopyTo(newSlice(&av.ArrayValue.Values, dest.getState())) + case *otlpcommon.AnyValue_BytesValue: + bv, ok := destOrig.Value.(*otlpcommon.AnyValue_BytesValue) + if !ok { + bv = &otlpcommon.AnyValue_BytesValue{} + destOrig.Value = bv + } + bv.BytesValue = make([]byte, len(ov.BytesValue)) + copy(bv.BytesValue, ov.BytesValue) + default: + // Primitive immutable type, no need for deep copy. + destOrig.Value = ov + } +} + +// AsString converts an OTLP Value object of any type to its equivalent string +// representation. This differs from Str which only returns a non-empty value +// if the ValueType is ValueTypeStr. +func (v Value) AsString() string { + switch v.Type() { + case ValueTypeEmpty: + return "" + + case ValueTypeStr: + return v.Str() + + case ValueTypeBool: + return strconv.FormatBool(v.Bool()) + + case ValueTypeDouble: + return float64AsString(v.Double()) + + case ValueTypeInt: + return strconv.FormatInt(v.Int(), 10) + + case ValueTypeMap: + jsonStr, _ := json.Marshal(v.Map().AsRaw()) + return string(jsonStr) + + case ValueTypeBytes: + return base64.StdEncoding.EncodeToString(*v.Bytes().getOrig()) + + case ValueTypeSlice: + jsonStr, _ := json.Marshal(v.Slice().AsRaw()) + return string(jsonStr) + + default: + return fmt.Sprintf("", v.Type()) + } +} + +// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.7:src/encoding/json/encode.go;l=585. +// This allows us to avoid using reflection. +func float64AsString(f float64) string { + if math.IsInf(f, 0) || math.IsNaN(f) { + return fmt.Sprintf("json: unsupported value: %s", strconv.FormatFloat(f, 'g', -1, 64)) + } + + // Convert as if by ES6 number to string conversion. + // This matches most other JSON generators. + // See golang.org/issue/6384 and golang.org/issue/14135. + // Like fmt %g, but the exponent cutoffs are different + // and exponents themselves are not padded to two digits. + scratch := [64]byte{} + b := scratch[:0] + abs := math.Abs(f) + fmt := byte('f') + if abs != 0 && (abs < 1e-6 || abs >= 1e21) { + fmt = 'e' + } + b = strconv.AppendFloat(b, f, fmt, -1, 64) + if fmt == 'e' { + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' { + b[n-2] = b[n-1] + b = b[:n-1] + } + } + return string(b) +} + +func (v Value) AsRaw() any { + switch v.Type() { + case ValueTypeEmpty: + return nil + case ValueTypeStr: + return v.Str() + case ValueTypeBool: + return v.Bool() + case ValueTypeDouble: + return v.Double() + case ValueTypeInt: + return v.Int() + case ValueTypeBytes: + return v.Bytes().AsRaw() + case ValueTypeMap: + return v.Map().AsRaw() + case ValueTypeSlice: + return v.Slice().AsRaw() + } + return fmt.Sprintf("", v.Type()) +} + +func newKeyValueString(k string, v string) otlpcommon.KeyValue { + orig := otlpcommon.KeyValue{Key: k} + state := internal.StateMutable + akv := newValue(&orig.Value, &state) + akv.SetStr(v) + return orig +} + +func newKeyValueInt(k string, v int64) otlpcommon.KeyValue { + orig := otlpcommon.KeyValue{Key: k} + state := internal.StateMutable + akv := newValue(&orig.Value, &state) + akv.SetInt(v) + return orig +} + +func newKeyValueDouble(k string, v float64) otlpcommon.KeyValue { + orig := otlpcommon.KeyValue{Key: k} + state := internal.StateMutable + akv := newValue(&orig.Value, &state) + akv.SetDouble(v) + return orig +} + +func newKeyValueBool(k string, v bool) otlpcommon.KeyValue { + orig := otlpcommon.KeyValue{Key: k} + state := internal.StateMutable + akv := newValue(&orig.Value, &state) + akv.SetBool(v) + return orig +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go new file mode 100644 index 000000000..0ba2b28c2 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/aggregation_temporality.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" + +import ( + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// AggregationTemporality defines how a metric aggregator reports aggregated values. +// It describes how those values relate to the time interval over which they are aggregated. +type AggregationTemporality int32 + +const ( + // AggregationTemporalityUnspecified is the default AggregationTemporality, it MUST NOT be used. + AggregationTemporalityUnspecified = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED) + // AggregationTemporalityDelta is a AggregationTemporality for a metric aggregator which reports changes since last report time. + AggregationTemporalityDelta = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA) + // AggregationTemporalityCumulative is a AggregationTemporality for a metric aggregator which reports changes since a fixed start time. + AggregationTemporalityCumulative = AggregationTemporality(otlpmetrics.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE) +) + +// String returns the string representation of the AggregationTemporality. +func (at AggregationTemporality) String() string { + switch at { + case AggregationTemporalityUnspecified: + return "Unspecified" + case AggregationTemporalityDelta: + return "Delta" + case AggregationTemporalityCumulative: + return "Cumulative" + } + return "" +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/encoding.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/encoding.go new file mode 100644 index 000000000..5bd7d5a58 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/encoding.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" + +// MarshalSizer is the interface that groups the basic Marshal and Size methods +type MarshalSizer interface { + Marshaler + Sizer +} + +// Marshaler marshals pmetric.Metrics into bytes. +type Marshaler interface { + // MarshalMetrics the given pmetric.Metrics into bytes. + // If the error is not nil, the returned bytes slice cannot be used. + MarshalMetrics(md Metrics) ([]byte, error) +} + +// Unmarshaler unmarshalls bytes into pmetric.Metrics. +type Unmarshaler interface { + // UnmarshalMetrics the given bytes into pmetric.Metrics. + // If the error is not nil, the returned pmetric.Metrics cannot be used. + UnmarshalMetrics(buf []byte) (Metrics, error) +} + +// Sizer is an optional interface implemented by the Marshaler, that calculates the size of a marshaled Metrics. +type Sizer interface { + // MetricsSize returns the size in bytes of a marshaled Metrics. + MetricsSize(md Metrics) int +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/exemplar_value_type.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/exemplar_value_type.go new file mode 100644 index 000000000..3faf242a6 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/exemplar_value_type.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" + +// ExemplarValueType specifies the type of Exemplar measurement value. +type ExemplarValueType int32 + +const ( + // ExemplarValueTypeEmpty means that exemplar value is unset. + ExemplarValueTypeEmpty ExemplarValueType = iota + ExemplarValueTypeInt + ExemplarValueTypeDouble +) + +// String returns the string representation of the ExemplarValueType. +func (nt ExemplarValueType) String() string { + switch nt { + case ExemplarValueTypeEmpty: + return "Empty" + case ExemplarValueTypeInt: + return "Int" + case ExemplarValueTypeDouble: + return "Double" + } + return "" +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go new file mode 100644 index 000000000..9937a1b50 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplar.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + "go.opentelemetry.io/collector/pdata/internal/data" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// Exemplar is a sample input double measurement. +// +// Exemplars also hold information about the environment when the measurement was recorded, +// for example the span and trace ID of the active span when the exemplar was recorded. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewExemplar function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Exemplar struct { + orig *otlpmetrics.Exemplar + state *internal.State +} + +func newExemplar(orig *otlpmetrics.Exemplar, state *internal.State) Exemplar { + return Exemplar{orig: orig, state: state} +} + +// NewExemplar creates a new empty Exemplar. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewExemplar() Exemplar { + state := internal.StateMutable + return newExemplar(&otlpmetrics.Exemplar{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Exemplar) MoveTo(dest Exemplar) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.Exemplar{} +} + +// Timestamp returns the timestamp associated with this Exemplar. +func (ms Exemplar) Timestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this Exemplar. +func (ms Exemplar) SetTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeUnixNano = uint64(v) +} + +// ValueType returns the type of the value for this Exemplar. +// Calling this function on zero-initialized Exemplar will cause a panic. +func (ms Exemplar) ValueType() ExemplarValueType { + switch ms.orig.Value.(type) { + case *otlpmetrics.Exemplar_AsDouble: + return ExemplarValueTypeDouble + case *otlpmetrics.Exemplar_AsInt: + return ExemplarValueTypeInt + } + return ExemplarValueTypeEmpty +} + +// DoubleValue returns the double associated with this Exemplar. +func (ms Exemplar) DoubleValue() float64 { + return ms.orig.GetAsDouble() +} + +// SetDoubleValue replaces the double associated with this Exemplar. +func (ms Exemplar) SetDoubleValue(v float64) { + ms.state.AssertMutable() + ms.orig.Value = &otlpmetrics.Exemplar_AsDouble{ + AsDouble: v, + } +} + +// IntValue returns the int associated with this Exemplar. +func (ms Exemplar) IntValue() int64 { + return ms.orig.GetAsInt() +} + +// SetIntValue replaces the int associated with this Exemplar. +func (ms Exemplar) SetIntValue(v int64) { + ms.state.AssertMutable() + ms.orig.Value = &otlpmetrics.Exemplar_AsInt{ + AsInt: v, + } +} + +// FilteredAttributes returns the FilteredAttributes associated with this Exemplar. +func (ms Exemplar) FilteredAttributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.FilteredAttributes, ms.state)) +} + +// TraceID returns the traceid associated with this Exemplar. +func (ms Exemplar) TraceID() pcommon.TraceID { + return pcommon.TraceID(ms.orig.TraceId) +} + +// SetTraceID replaces the traceid associated with this Exemplar. +func (ms Exemplar) SetTraceID(v pcommon.TraceID) { + ms.state.AssertMutable() + ms.orig.TraceId = data.TraceID(v) +} + +// SpanID returns the spanid associated with this Exemplar. +func (ms Exemplar) SpanID() pcommon.SpanID { + return pcommon.SpanID(ms.orig.SpanId) +} + +// SetSpanID replaces the spanid associated with this Exemplar. +func (ms Exemplar) SetSpanID(v pcommon.SpanID) { + ms.state.AssertMutable() + ms.orig.SpanId = data.SpanID(v) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Exemplar) CopyTo(dest Exemplar) { + dest.state.AssertMutable() + dest.SetTimestamp(ms.Timestamp()) + switch ms.ValueType() { + case ExemplarValueTypeDouble: + dest.SetDoubleValue(ms.DoubleValue()) + case ExemplarValueTypeInt: + dest.SetIntValue(ms.IntValue()) + } + + ms.FilteredAttributes().CopyTo(dest.FilteredAttributes()) + dest.SetTraceID(ms.TraceID()) + dest.SetSpanID(ms.SpanID()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go new file mode 100644 index 000000000..733341c36 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exemplarslice.go @@ -0,0 +1,137 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// ExemplarSlice logically represents a slice of Exemplar. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewExemplarSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ExemplarSlice struct { + orig *[]otlpmetrics.Exemplar + state *internal.State +} + +func newExemplarSlice(orig *[]otlpmetrics.Exemplar, state *internal.State) ExemplarSlice { + return ExemplarSlice{orig: orig, state: state} +} + +// NewExemplarSlice creates a ExemplarSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewExemplarSlice() ExemplarSlice { + orig := []otlpmetrics.Exemplar(nil) + state := internal.StateMutable + return newExemplarSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewExemplarSlice()". +func (es ExemplarSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ExemplarSlice) At(i int) Exemplar { + return newExemplar(&(*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ExemplarSlice can be initialized: +// +// es := NewExemplarSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ExemplarSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]otlpmetrics.Exemplar, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Exemplar. +// It returns the newly added Exemplar. +func (es ExemplarSlice) AppendEmpty() Exemplar { + es.state.AssertMutable() + *es.orig = append(*es.orig, otlpmetrics.Exemplar{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ExemplarSlice) MoveAndAppendTo(dest ExemplarSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ExemplarSlice) RemoveIf(f func(Exemplar) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ExemplarSlice) CopyTo(dest ExemplarSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + } else { + (*dest.orig) = make([]otlpmetrics.Exemplar, srcLen) + } + for i := range *es.orig { + newExemplar(&(*es.orig)[i], es.state).CopyTo(newExemplar(&(*dest.orig)[i], dest.state)) + } +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go new file mode 100644 index 000000000..18ba20d73 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogram.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// ExponentialHistogram represents the type of a metric that is calculated by aggregating +// as a ExponentialHistogram of all reported double measurements over a time interval. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewExponentialHistogram function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ExponentialHistogram struct { + orig *otlpmetrics.ExponentialHistogram + state *internal.State +} + +func newExponentialHistogram(orig *otlpmetrics.ExponentialHistogram, state *internal.State) ExponentialHistogram { + return ExponentialHistogram{orig: orig, state: state} +} + +// NewExponentialHistogram creates a new empty ExponentialHistogram. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewExponentialHistogram() ExponentialHistogram { + state := internal.StateMutable + return newExponentialHistogram(&otlpmetrics.ExponentialHistogram{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ExponentialHistogram) MoveTo(dest ExponentialHistogram) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.ExponentialHistogram{} +} + +// AggregationTemporality returns the aggregationtemporality associated with this ExponentialHistogram. +func (ms ExponentialHistogram) AggregationTemporality() AggregationTemporality { + return AggregationTemporality(ms.orig.AggregationTemporality) +} + +// SetAggregationTemporality replaces the aggregationtemporality associated with this ExponentialHistogram. +func (ms ExponentialHistogram) SetAggregationTemporality(v AggregationTemporality) { + ms.state.AssertMutable() + ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v) +} + +// DataPoints returns the DataPoints associated with this ExponentialHistogram. +func (ms ExponentialHistogram) DataPoints() ExponentialHistogramDataPointSlice { + return newExponentialHistogramDataPointSlice(&ms.orig.DataPoints, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ExponentialHistogram) CopyTo(dest ExponentialHistogram) { + dest.state.AssertMutable() + dest.SetAggregationTemporality(ms.AggregationTemporality()) + ms.DataPoints().CopyTo(dest.DataPoints()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go new file mode 100644 index 000000000..859a08b28 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapoint.go @@ -0,0 +1,232 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewExponentialHistogramDataPoint function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ExponentialHistogramDataPoint struct { + orig *otlpmetrics.ExponentialHistogramDataPoint + state *internal.State +} + +func newExponentialHistogramDataPoint(orig *otlpmetrics.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPoint { + return ExponentialHistogramDataPoint{orig: orig, state: state} +} + +// NewExponentialHistogramDataPoint creates a new empty ExponentialHistogramDataPoint. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewExponentialHistogramDataPoint() ExponentialHistogramDataPoint { + state := internal.StateMutable + return newExponentialHistogramDataPoint(&otlpmetrics.ExponentialHistogramDataPoint{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ExponentialHistogramDataPoint) MoveTo(dest ExponentialHistogramDataPoint) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.ExponentialHistogramDataPoint{} +} + +// Attributes returns the Attributes associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) Attributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) +} + +// StartTimestamp returns the starttimestamp associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) StartTimestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.StartTimeUnixNano) +} + +// SetStartTimestamp replaces the starttimestamp associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) SetStartTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.StartTimeUnixNano = uint64(v) +} + +// Timestamp returns the timestamp associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) Timestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) SetTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeUnixNano = uint64(v) +} + +// Count returns the count associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) Count() uint64 { + return ms.orig.Count +} + +// SetCount replaces the count associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) SetCount(v uint64) { + ms.state.AssertMutable() + ms.orig.Count = v +} + +// Scale returns the scale associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) Scale() int32 { + return ms.orig.Scale +} + +// SetScale replaces the scale associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) SetScale(v int32) { + ms.state.AssertMutable() + ms.orig.Scale = v +} + +// ZeroCount returns the zerocount associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) ZeroCount() uint64 { + return ms.orig.ZeroCount +} + +// SetZeroCount replaces the zerocount associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) SetZeroCount(v uint64) { + ms.state.AssertMutable() + ms.orig.ZeroCount = v +} + +// Positive returns the positive associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) Positive() ExponentialHistogramDataPointBuckets { + return newExponentialHistogramDataPointBuckets(&ms.orig.Positive, ms.state) +} + +// Negative returns the negative associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) Negative() ExponentialHistogramDataPointBuckets { + return newExponentialHistogramDataPointBuckets(&ms.orig.Negative, ms.state) +} + +// Exemplars returns the Exemplars associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) Exemplars() ExemplarSlice { + return newExemplarSlice(&ms.orig.Exemplars, ms.state) +} + +// Flags returns the flags associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) Flags() DataPointFlags { + return DataPointFlags(ms.orig.Flags) +} + +// SetFlags replaces the flags associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) SetFlags(v DataPointFlags) { + ms.state.AssertMutable() + ms.orig.Flags = uint32(v) +} + +// Sum returns the sum associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) Sum() float64 { + return ms.orig.GetSum() +} + +// HasSum returns true if the ExponentialHistogramDataPoint contains a +// Sum value, false otherwise. +func (ms ExponentialHistogramDataPoint) HasSum() bool { + return ms.orig.Sum_ != nil +} + +// SetSum replaces the sum associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) SetSum(v float64) { + ms.state.AssertMutable() + ms.orig.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{Sum: v} +} + +// RemoveSum removes the sum associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) RemoveSum() { + ms.state.AssertMutable() + ms.orig.Sum_ = nil +} + +// Min returns the min associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) Min() float64 { + return ms.orig.GetMin() +} + +// HasMin returns true if the ExponentialHistogramDataPoint contains a +// Min value, false otherwise. +func (ms ExponentialHistogramDataPoint) HasMin() bool { + return ms.orig.Min_ != nil +} + +// SetMin replaces the min associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) SetMin(v float64) { + ms.state.AssertMutable() + ms.orig.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{Min: v} +} + +// RemoveMin removes the min associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) RemoveMin() { + ms.state.AssertMutable() + ms.orig.Min_ = nil +} + +// Max returns the max associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) Max() float64 { + return ms.orig.GetMax() +} + +// HasMax returns true if the ExponentialHistogramDataPoint contains a +// Max value, false otherwise. +func (ms ExponentialHistogramDataPoint) HasMax() bool { + return ms.orig.Max_ != nil +} + +// SetMax replaces the max associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) SetMax(v float64) { + ms.state.AssertMutable() + ms.orig.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{Max: v} +} + +// RemoveMax removes the max associated with this ExponentialHistogramDataPoint. +func (ms ExponentialHistogramDataPoint) RemoveMax() { + ms.state.AssertMutable() + ms.orig.Max_ = nil +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ExponentialHistogramDataPoint) CopyTo(dest ExponentialHistogramDataPoint) { + dest.state.AssertMutable() + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetStartTimestamp(ms.StartTimestamp()) + dest.SetTimestamp(ms.Timestamp()) + dest.SetCount(ms.Count()) + dest.SetScale(ms.Scale()) + dest.SetZeroCount(ms.ZeroCount()) + ms.Positive().CopyTo(dest.Positive()) + ms.Negative().CopyTo(dest.Negative()) + ms.Exemplars().CopyTo(dest.Exemplars()) + dest.SetFlags(ms.Flags()) + if ms.HasSum() { + dest.SetSum(ms.Sum()) + } + + if ms.HasMin() { + dest.SetMin(ms.Min()) + } + + if ms.HasMax() { + dest.SetMax(ms.Max()) + } + +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go new file mode 100644 index 000000000..7acfbc627 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointbuckets.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ExponentialHistogramDataPointBuckets are a set of bucket counts, encoded in a contiguous array of counts. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewExponentialHistogramDataPointBuckets function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ExponentialHistogramDataPointBuckets struct { + orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets + state *internal.State +} + +func newExponentialHistogramDataPointBuckets(orig *otlpmetrics.ExponentialHistogramDataPoint_Buckets, state *internal.State) ExponentialHistogramDataPointBuckets { + return ExponentialHistogramDataPointBuckets{orig: orig, state: state} +} + +// NewExponentialHistogramDataPointBuckets creates a new empty ExponentialHistogramDataPointBuckets. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewExponentialHistogramDataPointBuckets() ExponentialHistogramDataPointBuckets { + state := internal.StateMutable + return newExponentialHistogramDataPointBuckets(&otlpmetrics.ExponentialHistogramDataPoint_Buckets{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ExponentialHistogramDataPointBuckets) MoveTo(dest ExponentialHistogramDataPointBuckets) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.ExponentialHistogramDataPoint_Buckets{} +} + +// Offset returns the offset associated with this ExponentialHistogramDataPointBuckets. +func (ms ExponentialHistogramDataPointBuckets) Offset() int32 { + return ms.orig.Offset +} + +// SetOffset replaces the offset associated with this ExponentialHistogramDataPointBuckets. +func (ms ExponentialHistogramDataPointBuckets) SetOffset(v int32) { + ms.state.AssertMutable() + ms.orig.Offset = v +} + +// BucketCounts returns the bucketcounts associated with this ExponentialHistogramDataPointBuckets. +func (ms ExponentialHistogramDataPointBuckets) BucketCounts() pcommon.UInt64Slice { + return pcommon.UInt64Slice(internal.NewUInt64Slice(&ms.orig.BucketCounts, ms.state)) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ExponentialHistogramDataPointBuckets) CopyTo(dest ExponentialHistogramDataPointBuckets) { + dest.state.AssertMutable() + dest.SetOffset(ms.Offset()) + ms.BucketCounts().CopyTo(dest.BucketCounts()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go new file mode 100644 index 000000000..94ec52656 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_exponentialhistogramdatapointslice.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// ExponentialHistogramDataPointSlice logically represents a slice of ExponentialHistogramDataPoint. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewExponentialHistogramDataPointSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ExponentialHistogramDataPointSlice struct { + orig *[]*otlpmetrics.ExponentialHistogramDataPoint + state *internal.State +} + +func newExponentialHistogramDataPointSlice(orig *[]*otlpmetrics.ExponentialHistogramDataPoint, state *internal.State) ExponentialHistogramDataPointSlice { + return ExponentialHistogramDataPointSlice{orig: orig, state: state} +} + +// NewExponentialHistogramDataPointSlice creates a ExponentialHistogramDataPointSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewExponentialHistogramDataPointSlice() ExponentialHistogramDataPointSlice { + orig := []*otlpmetrics.ExponentialHistogramDataPoint(nil) + state := internal.StateMutable + return newExponentialHistogramDataPointSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewExponentialHistogramDataPointSlice()". +func (es ExponentialHistogramDataPointSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ExponentialHistogramDataPointSlice) At(i int) ExponentialHistogramDataPoint { + return newExponentialHistogramDataPoint((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ExponentialHistogramDataPointSlice can be initialized: +// +// es := NewExponentialHistogramDataPointSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ExponentialHistogramDataPointSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpmetrics.ExponentialHistogramDataPoint, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ExponentialHistogramDataPoint. +// It returns the newly added ExponentialHistogramDataPoint. +func (es ExponentialHistogramDataPointSlice) AppendEmpty() ExponentialHistogramDataPoint { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpmetrics.ExponentialHistogramDataPoint{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ExponentialHistogramDataPointSlice) MoveAndAppendTo(dest ExponentialHistogramDataPointSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ExponentialHistogramDataPointSlice) RemoveIf(f func(ExponentialHistogramDataPoint) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ExponentialHistogramDataPointSlice) CopyTo(dest ExponentialHistogramDataPointSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newExponentialHistogramDataPoint((*es.orig)[i], es.state).CopyTo(newExponentialHistogramDataPoint((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpmetrics.ExponentialHistogramDataPoint, srcLen) + wrappers := make([]*otlpmetrics.ExponentialHistogramDataPoint, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newExponentialHistogramDataPoint((*es.orig)[i], es.state).CopyTo(newExponentialHistogramDataPoint(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ExponentialHistogramDataPoint elements within ExponentialHistogramDataPointSlice given the +// provided less function so that two instances of ExponentialHistogramDataPointSlice +// can be compared. +func (es ExponentialHistogramDataPointSlice) Sort(less func(a, b ExponentialHistogramDataPoint) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go new file mode 100644 index 000000000..ba572f9ca --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_gauge.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// Gauge represents the type of a numeric metric that always exports the "current value" for every data point. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewGauge function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Gauge struct { + orig *otlpmetrics.Gauge + state *internal.State +} + +func newGauge(orig *otlpmetrics.Gauge, state *internal.State) Gauge { + return Gauge{orig: orig, state: state} +} + +// NewGauge creates a new empty Gauge. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewGauge() Gauge { + state := internal.StateMutable + return newGauge(&otlpmetrics.Gauge{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Gauge) MoveTo(dest Gauge) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.Gauge{} +} + +// DataPoints returns the DataPoints associated with this Gauge. +func (ms Gauge) DataPoints() NumberDataPointSlice { + return newNumberDataPointSlice(&ms.orig.DataPoints, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Gauge) CopyTo(dest Gauge) { + dest.state.AssertMutable() + ms.DataPoints().CopyTo(dest.DataPoints()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go new file mode 100644 index 000000000..950fb1312 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogram.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// Histogram represents the type of a metric that is calculated by aggregating as a Histogram of all reported measurements over a time interval. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewHistogram function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Histogram struct { + orig *otlpmetrics.Histogram + state *internal.State +} + +func newHistogram(orig *otlpmetrics.Histogram, state *internal.State) Histogram { + return Histogram{orig: orig, state: state} +} + +// NewHistogram creates a new empty Histogram. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewHistogram() Histogram { + state := internal.StateMutable + return newHistogram(&otlpmetrics.Histogram{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Histogram) MoveTo(dest Histogram) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.Histogram{} +} + +// AggregationTemporality returns the aggregationtemporality associated with this Histogram. +func (ms Histogram) AggregationTemporality() AggregationTemporality { + return AggregationTemporality(ms.orig.AggregationTemporality) +} + +// SetAggregationTemporality replaces the aggregationtemporality associated with this Histogram. +func (ms Histogram) SetAggregationTemporality(v AggregationTemporality) { + ms.state.AssertMutable() + ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v) +} + +// DataPoints returns the DataPoints associated with this Histogram. +func (ms Histogram) DataPoints() HistogramDataPointSlice { + return newHistogramDataPointSlice(&ms.orig.DataPoints, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Histogram) CopyTo(dest Histogram) { + dest.state.AssertMutable() + dest.SetAggregationTemporality(ms.AggregationTemporality()) + ms.DataPoints().CopyTo(dest.DataPoints()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go new file mode 100644 index 000000000..22dc32344 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapoint.go @@ -0,0 +1,205 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// HistogramDataPoint is a single data point in a timeseries that describes the time-varying values of a Histogram of values. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewHistogramDataPoint function to create new instances. +// Important: zero-initialized instance is not valid for use. +type HistogramDataPoint struct { + orig *otlpmetrics.HistogramDataPoint + state *internal.State +} + +func newHistogramDataPoint(orig *otlpmetrics.HistogramDataPoint, state *internal.State) HistogramDataPoint { + return HistogramDataPoint{orig: orig, state: state} +} + +// NewHistogramDataPoint creates a new empty HistogramDataPoint. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewHistogramDataPoint() HistogramDataPoint { + state := internal.StateMutable + return newHistogramDataPoint(&otlpmetrics.HistogramDataPoint{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms HistogramDataPoint) MoveTo(dest HistogramDataPoint) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.HistogramDataPoint{} +} + +// Attributes returns the Attributes associated with this HistogramDataPoint. +func (ms HistogramDataPoint) Attributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) +} + +// StartTimestamp returns the starttimestamp associated with this HistogramDataPoint. +func (ms HistogramDataPoint) StartTimestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.StartTimeUnixNano) +} + +// SetStartTimestamp replaces the starttimestamp associated with this HistogramDataPoint. +func (ms HistogramDataPoint) SetStartTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.StartTimeUnixNano = uint64(v) +} + +// Timestamp returns the timestamp associated with this HistogramDataPoint. +func (ms HistogramDataPoint) Timestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this HistogramDataPoint. +func (ms HistogramDataPoint) SetTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeUnixNano = uint64(v) +} + +// Count returns the count associated with this HistogramDataPoint. +func (ms HistogramDataPoint) Count() uint64 { + return ms.orig.Count +} + +// SetCount replaces the count associated with this HistogramDataPoint. +func (ms HistogramDataPoint) SetCount(v uint64) { + ms.state.AssertMutable() + ms.orig.Count = v +} + +// BucketCounts returns the bucketcounts associated with this HistogramDataPoint. +func (ms HistogramDataPoint) BucketCounts() pcommon.UInt64Slice { + return pcommon.UInt64Slice(internal.NewUInt64Slice(&ms.orig.BucketCounts, ms.state)) +} + +// ExplicitBounds returns the explicitbounds associated with this HistogramDataPoint. +func (ms HistogramDataPoint) ExplicitBounds() pcommon.Float64Slice { + return pcommon.Float64Slice(internal.NewFloat64Slice(&ms.orig.ExplicitBounds, ms.state)) +} + +// Exemplars returns the Exemplars associated with this HistogramDataPoint. +func (ms HistogramDataPoint) Exemplars() ExemplarSlice { + return newExemplarSlice(&ms.orig.Exemplars, ms.state) +} + +// Flags returns the flags associated with this HistogramDataPoint. +func (ms HistogramDataPoint) Flags() DataPointFlags { + return DataPointFlags(ms.orig.Flags) +} + +// SetFlags replaces the flags associated with this HistogramDataPoint. +func (ms HistogramDataPoint) SetFlags(v DataPointFlags) { + ms.state.AssertMutable() + ms.orig.Flags = uint32(v) +} + +// Sum returns the sum associated with this HistogramDataPoint. +func (ms HistogramDataPoint) Sum() float64 { + return ms.orig.GetSum() +} + +// HasSum returns true if the HistogramDataPoint contains a +// Sum value, false otherwise. +func (ms HistogramDataPoint) HasSum() bool { + return ms.orig.Sum_ != nil +} + +// SetSum replaces the sum associated with this HistogramDataPoint. +func (ms HistogramDataPoint) SetSum(v float64) { + ms.state.AssertMutable() + ms.orig.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: v} +} + +// RemoveSum removes the sum associated with this HistogramDataPoint. +func (ms HistogramDataPoint) RemoveSum() { + ms.state.AssertMutable() + ms.orig.Sum_ = nil +} + +// Min returns the min associated with this HistogramDataPoint. +func (ms HistogramDataPoint) Min() float64 { + return ms.orig.GetMin() +} + +// HasMin returns true if the HistogramDataPoint contains a +// Min value, false otherwise. +func (ms HistogramDataPoint) HasMin() bool { + return ms.orig.Min_ != nil +} + +// SetMin replaces the min associated with this HistogramDataPoint. +func (ms HistogramDataPoint) SetMin(v float64) { + ms.state.AssertMutable() + ms.orig.Min_ = &otlpmetrics.HistogramDataPoint_Min{Min: v} +} + +// RemoveMin removes the min associated with this HistogramDataPoint. +func (ms HistogramDataPoint) RemoveMin() { + ms.state.AssertMutable() + ms.orig.Min_ = nil +} + +// Max returns the max associated with this HistogramDataPoint. +func (ms HistogramDataPoint) Max() float64 { + return ms.orig.GetMax() +} + +// HasMax returns true if the HistogramDataPoint contains a +// Max value, false otherwise. +func (ms HistogramDataPoint) HasMax() bool { + return ms.orig.Max_ != nil +} + +// SetMax replaces the max associated with this HistogramDataPoint. +func (ms HistogramDataPoint) SetMax(v float64) { + ms.state.AssertMutable() + ms.orig.Max_ = &otlpmetrics.HistogramDataPoint_Max{Max: v} +} + +// RemoveMax removes the max associated with this HistogramDataPoint. +func (ms HistogramDataPoint) RemoveMax() { + ms.state.AssertMutable() + ms.orig.Max_ = nil +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms HistogramDataPoint) CopyTo(dest HistogramDataPoint) { + dest.state.AssertMutable() + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetStartTimestamp(ms.StartTimestamp()) + dest.SetTimestamp(ms.Timestamp()) + dest.SetCount(ms.Count()) + ms.BucketCounts().CopyTo(dest.BucketCounts()) + ms.ExplicitBounds().CopyTo(dest.ExplicitBounds()) + ms.Exemplars().CopyTo(dest.Exemplars()) + dest.SetFlags(ms.Flags()) + if ms.HasSum() { + dest.SetSum(ms.Sum()) + } + + if ms.HasMin() { + dest.SetMin(ms.Min()) + } + + if ms.HasMax() { + dest.SetMax(ms.Max()) + } + +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go new file mode 100644 index 000000000..47b02e17f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_histogramdatapointslice.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// HistogramDataPointSlice logically represents a slice of HistogramDataPoint. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewHistogramDataPointSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type HistogramDataPointSlice struct { + orig *[]*otlpmetrics.HistogramDataPoint + state *internal.State +} + +func newHistogramDataPointSlice(orig *[]*otlpmetrics.HistogramDataPoint, state *internal.State) HistogramDataPointSlice { + return HistogramDataPointSlice{orig: orig, state: state} +} + +// NewHistogramDataPointSlice creates a HistogramDataPointSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewHistogramDataPointSlice() HistogramDataPointSlice { + orig := []*otlpmetrics.HistogramDataPoint(nil) + state := internal.StateMutable + return newHistogramDataPointSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewHistogramDataPointSlice()". +func (es HistogramDataPointSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es HistogramDataPointSlice) At(i int) HistogramDataPoint { + return newHistogramDataPoint((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new HistogramDataPointSlice can be initialized: +// +// es := NewHistogramDataPointSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es HistogramDataPointSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpmetrics.HistogramDataPoint, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty HistogramDataPoint. +// It returns the newly added HistogramDataPoint. +func (es HistogramDataPointSlice) AppendEmpty() HistogramDataPoint { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpmetrics.HistogramDataPoint{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es HistogramDataPointSlice) MoveAndAppendTo(dest HistogramDataPointSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es HistogramDataPointSlice) RemoveIf(f func(HistogramDataPoint) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es HistogramDataPointSlice) CopyTo(dest HistogramDataPointSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newHistogramDataPoint((*es.orig)[i], es.state).CopyTo(newHistogramDataPoint((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpmetrics.HistogramDataPoint, srcLen) + wrappers := make([]*otlpmetrics.HistogramDataPoint, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newHistogramDataPoint((*es.orig)[i], es.state).CopyTo(newHistogramDataPoint(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the HistogramDataPoint elements within HistogramDataPointSlice given the +// provided less function so that two instances of HistogramDataPointSlice +// can be compared. +func (es HistogramDataPointSlice) Sort(less func(a, b HistogramDataPoint) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go new file mode 100644 index 000000000..839628520 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metric.go @@ -0,0 +1,249 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// Metric represents one metric as a collection of datapoints. +// See Metric definition in OTLP: https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewMetric function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Metric struct { + orig *otlpmetrics.Metric + state *internal.State +} + +func newMetric(orig *otlpmetrics.Metric, state *internal.State) Metric { + return Metric{orig: orig, state: state} +} + +// NewMetric creates a new empty Metric. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewMetric() Metric { + state := internal.StateMutable + return newMetric(&otlpmetrics.Metric{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Metric) MoveTo(dest Metric) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.Metric{} +} + +// Name returns the name associated with this Metric. +func (ms Metric) Name() string { + return ms.orig.Name +} + +// SetName replaces the name associated with this Metric. +func (ms Metric) SetName(v string) { + ms.state.AssertMutable() + ms.orig.Name = v +} + +// Description returns the description associated with this Metric. +func (ms Metric) Description() string { + return ms.orig.Description +} + +// SetDescription replaces the description associated with this Metric. +func (ms Metric) SetDescription(v string) { + ms.state.AssertMutable() + ms.orig.Description = v +} + +// Unit returns the unit associated with this Metric. +func (ms Metric) Unit() string { + return ms.orig.Unit +} + +// SetUnit replaces the unit associated with this Metric. +func (ms Metric) SetUnit(v string) { + ms.state.AssertMutable() + ms.orig.Unit = v +} + +// Type returns the type of the data for this Metric. +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) Type() MetricType { + switch ms.orig.Data.(type) { + case *otlpmetrics.Metric_Gauge: + return MetricTypeGauge + case *otlpmetrics.Metric_Sum: + return MetricTypeSum + case *otlpmetrics.Metric_Histogram: + return MetricTypeHistogram + case *otlpmetrics.Metric_ExponentialHistogram: + return MetricTypeExponentialHistogram + case *otlpmetrics.Metric_Summary: + return MetricTypeSummary + } + return MetricTypeEmpty +} + +// Gauge returns the gauge associated with this Metric. +// +// Calling this function when Type() != MetricTypeGauge returns an invalid +// zero-initialized instance of Gauge. Note that using such Gauge instance can cause panic. +// +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) Gauge() Gauge { + v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Gauge) + if !ok { + return Gauge{} + } + return newGauge(v.Gauge, ms.state) +} + +// SetEmptyGauge sets an empty gauge to this Metric. +// +// After this, Type() function will return MetricTypeGauge". +// +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) SetEmptyGauge() Gauge { + ms.state.AssertMutable() + val := &otlpmetrics.Gauge{} + ms.orig.Data = &otlpmetrics.Metric_Gauge{Gauge: val} + return newGauge(val, ms.state) +} + +// Sum returns the sum associated with this Metric. +// +// Calling this function when Type() != MetricTypeSum returns an invalid +// zero-initialized instance of Sum. Note that using such Sum instance can cause panic. +// +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) Sum() Sum { + v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Sum) + if !ok { + return Sum{} + } + return newSum(v.Sum, ms.state) +} + +// SetEmptySum sets an empty sum to this Metric. +// +// After this, Type() function will return MetricTypeSum". +// +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) SetEmptySum() Sum { + ms.state.AssertMutable() + val := &otlpmetrics.Sum{} + ms.orig.Data = &otlpmetrics.Metric_Sum{Sum: val} + return newSum(val, ms.state) +} + +// Histogram returns the histogram associated with this Metric. +// +// Calling this function when Type() != MetricTypeHistogram returns an invalid +// zero-initialized instance of Histogram. Note that using such Histogram instance can cause panic. +// +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) Histogram() Histogram { + v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Histogram) + if !ok { + return Histogram{} + } + return newHistogram(v.Histogram, ms.state) +} + +// SetEmptyHistogram sets an empty histogram to this Metric. +// +// After this, Type() function will return MetricTypeHistogram". +// +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) SetEmptyHistogram() Histogram { + ms.state.AssertMutable() + val := &otlpmetrics.Histogram{} + ms.orig.Data = &otlpmetrics.Metric_Histogram{Histogram: val} + return newHistogram(val, ms.state) +} + +// ExponentialHistogram returns the exponentialhistogram associated with this Metric. +// +// Calling this function when Type() != MetricTypeExponentialHistogram returns an invalid +// zero-initialized instance of ExponentialHistogram. Note that using such ExponentialHistogram instance can cause panic. +// +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) ExponentialHistogram() ExponentialHistogram { + v, ok := ms.orig.GetData().(*otlpmetrics.Metric_ExponentialHistogram) + if !ok { + return ExponentialHistogram{} + } + return newExponentialHistogram(v.ExponentialHistogram, ms.state) +} + +// SetEmptyExponentialHistogram sets an empty exponentialhistogram to this Metric. +// +// After this, Type() function will return MetricTypeExponentialHistogram". +// +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) SetEmptyExponentialHistogram() ExponentialHistogram { + ms.state.AssertMutable() + val := &otlpmetrics.ExponentialHistogram{} + ms.orig.Data = &otlpmetrics.Metric_ExponentialHistogram{ExponentialHistogram: val} + return newExponentialHistogram(val, ms.state) +} + +// Summary returns the summary associated with this Metric. +// +// Calling this function when Type() != MetricTypeSummary returns an invalid +// zero-initialized instance of Summary. Note that using such Summary instance can cause panic. +// +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) Summary() Summary { + v, ok := ms.orig.GetData().(*otlpmetrics.Metric_Summary) + if !ok { + return Summary{} + } + return newSummary(v.Summary, ms.state) +} + +// SetEmptySummary sets an empty summary to this Metric. +// +// After this, Type() function will return MetricTypeSummary". +// +// Calling this function on zero-initialized Metric will cause a panic. +func (ms Metric) SetEmptySummary() Summary { + ms.state.AssertMutable() + val := &otlpmetrics.Summary{} + ms.orig.Data = &otlpmetrics.Metric_Summary{Summary: val} + return newSummary(val, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Metric) CopyTo(dest Metric) { + dest.state.AssertMutable() + dest.SetName(ms.Name()) + dest.SetDescription(ms.Description()) + dest.SetUnit(ms.Unit()) + switch ms.Type() { + case MetricTypeGauge: + ms.Gauge().CopyTo(dest.SetEmptyGauge()) + case MetricTypeSum: + ms.Sum().CopyTo(dest.SetEmptySum()) + case MetricTypeHistogram: + ms.Histogram().CopyTo(dest.SetEmptyHistogram()) + case MetricTypeExponentialHistogram: + ms.ExponentialHistogram().CopyTo(dest.SetEmptyExponentialHistogram()) + case MetricTypeSummary: + ms.Summary().CopyTo(dest.SetEmptySummary()) + } + +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go new file mode 100644 index 000000000..30d2e0c1f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_metricslice.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// MetricSlice logically represents a slice of Metric. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewMetricSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type MetricSlice struct { + orig *[]*otlpmetrics.Metric + state *internal.State +} + +func newMetricSlice(orig *[]*otlpmetrics.Metric, state *internal.State) MetricSlice { + return MetricSlice{orig: orig, state: state} +} + +// NewMetricSlice creates a MetricSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewMetricSlice() MetricSlice { + orig := []*otlpmetrics.Metric(nil) + state := internal.StateMutable + return newMetricSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewMetricSlice()". +func (es MetricSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es MetricSlice) At(i int) Metric { + return newMetric((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new MetricSlice can be initialized: +// +// es := NewMetricSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es MetricSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpmetrics.Metric, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty Metric. +// It returns the newly added Metric. +func (es MetricSlice) AppendEmpty() Metric { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpmetrics.Metric{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es MetricSlice) MoveAndAppendTo(dest MetricSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es MetricSlice) RemoveIf(f func(Metric) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es MetricSlice) CopyTo(dest MetricSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newMetric((*es.orig)[i], es.state).CopyTo(newMetric((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpmetrics.Metric, srcLen) + wrappers := make([]*otlpmetrics.Metric, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newMetric((*es.orig)[i], es.state).CopyTo(newMetric(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the Metric elements within MetricSlice given the +// provided less function so that two instances of MetricSlice +// can be compared. +func (es MetricSlice) Sort(less func(a, b Metric) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go new file mode 100644 index 000000000..bc47c4747 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapoint.go @@ -0,0 +1,145 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// NumberDataPoint is a single data point in a timeseries that describes the time-varying value of a number metric. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewNumberDataPoint function to create new instances. +// Important: zero-initialized instance is not valid for use. +type NumberDataPoint struct { + orig *otlpmetrics.NumberDataPoint + state *internal.State +} + +func newNumberDataPoint(orig *otlpmetrics.NumberDataPoint, state *internal.State) NumberDataPoint { + return NumberDataPoint{orig: orig, state: state} +} + +// NewNumberDataPoint creates a new empty NumberDataPoint. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewNumberDataPoint() NumberDataPoint { + state := internal.StateMutable + return newNumberDataPoint(&otlpmetrics.NumberDataPoint{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms NumberDataPoint) MoveTo(dest NumberDataPoint) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.NumberDataPoint{} +} + +// Attributes returns the Attributes associated with this NumberDataPoint. +func (ms NumberDataPoint) Attributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) +} + +// StartTimestamp returns the starttimestamp associated with this NumberDataPoint. +func (ms NumberDataPoint) StartTimestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.StartTimeUnixNano) +} + +// SetStartTimestamp replaces the starttimestamp associated with this NumberDataPoint. +func (ms NumberDataPoint) SetStartTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.StartTimeUnixNano = uint64(v) +} + +// Timestamp returns the timestamp associated with this NumberDataPoint. +func (ms NumberDataPoint) Timestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this NumberDataPoint. +func (ms NumberDataPoint) SetTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeUnixNano = uint64(v) +} + +// ValueType returns the type of the value for this NumberDataPoint. +// Calling this function on zero-initialized NumberDataPoint will cause a panic. +func (ms NumberDataPoint) ValueType() NumberDataPointValueType { + switch ms.orig.Value.(type) { + case *otlpmetrics.NumberDataPoint_AsDouble: + return NumberDataPointValueTypeDouble + case *otlpmetrics.NumberDataPoint_AsInt: + return NumberDataPointValueTypeInt + } + return NumberDataPointValueTypeEmpty +} + +// DoubleValue returns the double associated with this NumberDataPoint. +func (ms NumberDataPoint) DoubleValue() float64 { + return ms.orig.GetAsDouble() +} + +// SetDoubleValue replaces the double associated with this NumberDataPoint. +func (ms NumberDataPoint) SetDoubleValue(v float64) { + ms.state.AssertMutable() + ms.orig.Value = &otlpmetrics.NumberDataPoint_AsDouble{ + AsDouble: v, + } +} + +// IntValue returns the int associated with this NumberDataPoint. +func (ms NumberDataPoint) IntValue() int64 { + return ms.orig.GetAsInt() +} + +// SetIntValue replaces the int associated with this NumberDataPoint. +func (ms NumberDataPoint) SetIntValue(v int64) { + ms.state.AssertMutable() + ms.orig.Value = &otlpmetrics.NumberDataPoint_AsInt{ + AsInt: v, + } +} + +// Exemplars returns the Exemplars associated with this NumberDataPoint. +func (ms NumberDataPoint) Exemplars() ExemplarSlice { + return newExemplarSlice(&ms.orig.Exemplars, ms.state) +} + +// Flags returns the flags associated with this NumberDataPoint. +func (ms NumberDataPoint) Flags() DataPointFlags { + return DataPointFlags(ms.orig.Flags) +} + +// SetFlags replaces the flags associated with this NumberDataPoint. +func (ms NumberDataPoint) SetFlags(v DataPointFlags) { + ms.state.AssertMutable() + ms.orig.Flags = uint32(v) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms NumberDataPoint) CopyTo(dest NumberDataPoint) { + dest.state.AssertMutable() + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetStartTimestamp(ms.StartTimestamp()) + dest.SetTimestamp(ms.Timestamp()) + switch ms.ValueType() { + case NumberDataPointValueTypeDouble: + dest.SetDoubleValue(ms.DoubleValue()) + case NumberDataPointValueTypeInt: + dest.SetIntValue(ms.IntValue()) + } + + ms.Exemplars().CopyTo(dest.Exemplars()) + dest.SetFlags(ms.Flags()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go new file mode 100644 index 000000000..13cd71b72 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_numberdatapointslice.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// NumberDataPointSlice logically represents a slice of NumberDataPoint. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewNumberDataPointSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type NumberDataPointSlice struct { + orig *[]*otlpmetrics.NumberDataPoint + state *internal.State +} + +func newNumberDataPointSlice(orig *[]*otlpmetrics.NumberDataPoint, state *internal.State) NumberDataPointSlice { + return NumberDataPointSlice{orig: orig, state: state} +} + +// NewNumberDataPointSlice creates a NumberDataPointSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewNumberDataPointSlice() NumberDataPointSlice { + orig := []*otlpmetrics.NumberDataPoint(nil) + state := internal.StateMutable + return newNumberDataPointSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewNumberDataPointSlice()". +func (es NumberDataPointSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es NumberDataPointSlice) At(i int) NumberDataPoint { + return newNumberDataPoint((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new NumberDataPointSlice can be initialized: +// +// es := NewNumberDataPointSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es NumberDataPointSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpmetrics.NumberDataPoint, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty NumberDataPoint. +// It returns the newly added NumberDataPoint. +func (es NumberDataPointSlice) AppendEmpty() NumberDataPoint { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpmetrics.NumberDataPoint{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es NumberDataPointSlice) MoveAndAppendTo(dest NumberDataPointSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es NumberDataPointSlice) RemoveIf(f func(NumberDataPoint) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es NumberDataPointSlice) CopyTo(dest NumberDataPointSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newNumberDataPoint((*es.orig)[i], es.state).CopyTo(newNumberDataPoint((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpmetrics.NumberDataPoint, srcLen) + wrappers := make([]*otlpmetrics.NumberDataPoint, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newNumberDataPoint((*es.orig)[i], es.state).CopyTo(newNumberDataPoint(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the NumberDataPoint elements within NumberDataPointSlice given the +// provided less function so that two instances of NumberDataPointSlice +// can be compared. +func (es NumberDataPointSlice) Sort(less func(a, b NumberDataPoint) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go new file mode 100644 index 000000000..43622f3f8 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetrics.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ResourceMetrics is a collection of metrics from a Resource. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewResourceMetrics function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceMetrics struct { + orig *otlpmetrics.ResourceMetrics + state *internal.State +} + +func newResourceMetrics(orig *otlpmetrics.ResourceMetrics, state *internal.State) ResourceMetrics { + return ResourceMetrics{orig: orig, state: state} +} + +// NewResourceMetrics creates a new empty ResourceMetrics. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewResourceMetrics() ResourceMetrics { + state := internal.StateMutable + return newResourceMetrics(&otlpmetrics.ResourceMetrics{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ResourceMetrics) MoveTo(dest ResourceMetrics) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.ResourceMetrics{} +} + +// Resource returns the resource associated with this ResourceMetrics. +func (ms ResourceMetrics) Resource() pcommon.Resource { + return pcommon.Resource(internal.NewResource(&ms.orig.Resource, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ResourceMetrics. +func (ms ResourceMetrics) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ResourceMetrics. +func (ms ResourceMetrics) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// ScopeMetrics returns the ScopeMetrics associated with this ResourceMetrics. +func (ms ResourceMetrics) ScopeMetrics() ScopeMetricsSlice { + return newScopeMetricsSlice(&ms.orig.ScopeMetrics, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ResourceMetrics) CopyTo(dest ResourceMetrics) { + dest.state.AssertMutable() + ms.Resource().CopyTo(dest.Resource()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.ScopeMetrics().CopyTo(dest.ScopeMetrics()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go new file mode 100644 index 000000000..b25fdc721 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_resourcemetricsslice.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// ResourceMetricsSlice logically represents a slice of ResourceMetrics. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewResourceMetricsSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ResourceMetricsSlice struct { + orig *[]*otlpmetrics.ResourceMetrics + state *internal.State +} + +func newResourceMetricsSlice(orig *[]*otlpmetrics.ResourceMetrics, state *internal.State) ResourceMetricsSlice { + return ResourceMetricsSlice{orig: orig, state: state} +} + +// NewResourceMetricsSlice creates a ResourceMetricsSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewResourceMetricsSlice() ResourceMetricsSlice { + orig := []*otlpmetrics.ResourceMetrics(nil) + state := internal.StateMutable + return newResourceMetricsSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewResourceMetricsSlice()". +func (es ResourceMetricsSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ResourceMetricsSlice) At(i int) ResourceMetrics { + return newResourceMetrics((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ResourceMetricsSlice can be initialized: +// +// es := NewResourceMetricsSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ResourceMetricsSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpmetrics.ResourceMetrics, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ResourceMetrics. +// It returns the newly added ResourceMetrics. +func (es ResourceMetricsSlice) AppendEmpty() ResourceMetrics { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpmetrics.ResourceMetrics{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ResourceMetricsSlice) MoveAndAppendTo(dest ResourceMetricsSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ResourceMetricsSlice) RemoveIf(f func(ResourceMetrics) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ResourceMetricsSlice) CopyTo(dest ResourceMetricsSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newResourceMetrics((*es.orig)[i], es.state).CopyTo(newResourceMetrics((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpmetrics.ResourceMetrics, srcLen) + wrappers := make([]*otlpmetrics.ResourceMetrics, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newResourceMetrics((*es.orig)[i], es.state).CopyTo(newResourceMetrics(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ResourceMetrics elements within ResourceMetricsSlice given the +// provided less function so that two instances of ResourceMetricsSlice +// can be compared. +func (es ResourceMetricsSlice) Sort(less func(a, b ResourceMetrics) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go new file mode 100644 index 000000000..1151c36da --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetrics.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// ScopeMetrics is a collection of metrics from a LibraryInstrumentation. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewScopeMetrics function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeMetrics struct { + orig *otlpmetrics.ScopeMetrics + state *internal.State +} + +func newScopeMetrics(orig *otlpmetrics.ScopeMetrics, state *internal.State) ScopeMetrics { + return ScopeMetrics{orig: orig, state: state} +} + +// NewScopeMetrics creates a new empty ScopeMetrics. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewScopeMetrics() ScopeMetrics { + state := internal.StateMutable + return newScopeMetrics(&otlpmetrics.ScopeMetrics{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ScopeMetrics) MoveTo(dest ScopeMetrics) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.ScopeMetrics{} +} + +// Scope returns the scope associated with this ScopeMetrics. +func (ms ScopeMetrics) Scope() pcommon.InstrumentationScope { + return pcommon.InstrumentationScope(internal.NewInstrumentationScope(&ms.orig.Scope, ms.state)) +} + +// SchemaUrl returns the schemaurl associated with this ScopeMetrics. +func (ms ScopeMetrics) SchemaUrl() string { + return ms.orig.SchemaUrl +} + +// SetSchemaUrl replaces the schemaurl associated with this ScopeMetrics. +func (ms ScopeMetrics) SetSchemaUrl(v string) { + ms.state.AssertMutable() + ms.orig.SchemaUrl = v +} + +// Metrics returns the Metrics associated with this ScopeMetrics. +func (ms ScopeMetrics) Metrics() MetricSlice { + return newMetricSlice(&ms.orig.Metrics, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ScopeMetrics) CopyTo(dest ScopeMetrics) { + dest.state.AssertMutable() + ms.Scope().CopyTo(dest.Scope()) + dest.SetSchemaUrl(ms.SchemaUrl()) + ms.Metrics().CopyTo(dest.Metrics()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go new file mode 100644 index 000000000..d2bbe6108 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_scopemetricsslice.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// ScopeMetricsSlice logically represents a slice of ScopeMetrics. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewScopeMetricsSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ScopeMetricsSlice struct { + orig *[]*otlpmetrics.ScopeMetrics + state *internal.State +} + +func newScopeMetricsSlice(orig *[]*otlpmetrics.ScopeMetrics, state *internal.State) ScopeMetricsSlice { + return ScopeMetricsSlice{orig: orig, state: state} +} + +// NewScopeMetricsSlice creates a ScopeMetricsSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewScopeMetricsSlice() ScopeMetricsSlice { + orig := []*otlpmetrics.ScopeMetrics(nil) + state := internal.StateMutable + return newScopeMetricsSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewScopeMetricsSlice()". +func (es ScopeMetricsSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es ScopeMetricsSlice) At(i int) ScopeMetrics { + return newScopeMetrics((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new ScopeMetricsSlice can be initialized: +// +// es := NewScopeMetricsSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es ScopeMetricsSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpmetrics.ScopeMetrics, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty ScopeMetrics. +// It returns the newly added ScopeMetrics. +func (es ScopeMetricsSlice) AppendEmpty() ScopeMetrics { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpmetrics.ScopeMetrics{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es ScopeMetricsSlice) MoveAndAppendTo(dest ScopeMetricsSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es ScopeMetricsSlice) RemoveIf(f func(ScopeMetrics) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es ScopeMetricsSlice) CopyTo(dest ScopeMetricsSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newScopeMetrics((*es.orig)[i], es.state).CopyTo(newScopeMetrics((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpmetrics.ScopeMetrics, srcLen) + wrappers := make([]*otlpmetrics.ScopeMetrics, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newScopeMetrics((*es.orig)[i], es.state).CopyTo(newScopeMetrics(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the ScopeMetrics elements within ScopeMetricsSlice given the +// provided less function so that two instances of ScopeMetricsSlice +// can be compared. +func (es ScopeMetricsSlice) Sort(less func(a, b ScopeMetrics) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go new file mode 100644 index 000000000..7def0749a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_sum.go @@ -0,0 +1,81 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// Sum represents the type of a numeric metric that is calculated as a sum of all reported measurements over a time interval. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSum function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Sum struct { + orig *otlpmetrics.Sum + state *internal.State +} + +func newSum(orig *otlpmetrics.Sum, state *internal.State) Sum { + return Sum{orig: orig, state: state} +} + +// NewSum creates a new empty Sum. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewSum() Sum { + state := internal.StateMutable + return newSum(&otlpmetrics.Sum{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Sum) MoveTo(dest Sum) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.Sum{} +} + +// AggregationTemporality returns the aggregationtemporality associated with this Sum. +func (ms Sum) AggregationTemporality() AggregationTemporality { + return AggregationTemporality(ms.orig.AggregationTemporality) +} + +// SetAggregationTemporality replaces the aggregationtemporality associated with this Sum. +func (ms Sum) SetAggregationTemporality(v AggregationTemporality) { + ms.state.AssertMutable() + ms.orig.AggregationTemporality = otlpmetrics.AggregationTemporality(v) +} + +// IsMonotonic returns the ismonotonic associated with this Sum. +func (ms Sum) IsMonotonic() bool { + return ms.orig.IsMonotonic +} + +// SetIsMonotonic replaces the ismonotonic associated with this Sum. +func (ms Sum) SetIsMonotonic(v bool) { + ms.state.AssertMutable() + ms.orig.IsMonotonic = v +} + +// DataPoints returns the DataPoints associated with this Sum. +func (ms Sum) DataPoints() NumberDataPointSlice { + return newNumberDataPointSlice(&ms.orig.DataPoints, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Sum) CopyTo(dest Sum) { + dest.state.AssertMutable() + dest.SetAggregationTemporality(ms.AggregationTemporality()) + dest.SetIsMonotonic(ms.IsMonotonic()) + ms.DataPoints().CopyTo(dest.DataPoints()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go new file mode 100644 index 000000000..64fbffbef --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summary.go @@ -0,0 +1,57 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// Summary represents the type of a metric that is calculated by aggregating as a Summary of all reported double measurements over a time interval. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSummary function to create new instances. +// Important: zero-initialized instance is not valid for use. +type Summary struct { + orig *otlpmetrics.Summary + state *internal.State +} + +func newSummary(orig *otlpmetrics.Summary, state *internal.State) Summary { + return Summary{orig: orig, state: state} +} + +// NewSummary creates a new empty Summary. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewSummary() Summary { + state := internal.StateMutable + return newSummary(&otlpmetrics.Summary{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms Summary) MoveTo(dest Summary) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.Summary{} +} + +// DataPoints returns the DataPoints associated with this Summary. +func (ms Summary) DataPoints() SummaryDataPointSlice { + return newSummaryDataPointSlice(&ms.orig.DataPoints, ms.state) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms Summary) CopyTo(dest Summary) { + dest.state.AssertMutable() + ms.DataPoints().CopyTo(dest.DataPoints()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go new file mode 100644 index 000000000..0f0f6dd1e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapoint.go @@ -0,0 +1,124 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/pdata/pcommon" +) + +// SummaryDataPoint is a single data point in a timeseries that describes the time-varying values of a Summary of double values. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSummaryDataPoint function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SummaryDataPoint struct { + orig *otlpmetrics.SummaryDataPoint + state *internal.State +} + +func newSummaryDataPoint(orig *otlpmetrics.SummaryDataPoint, state *internal.State) SummaryDataPoint { + return SummaryDataPoint{orig: orig, state: state} +} + +// NewSummaryDataPoint creates a new empty SummaryDataPoint. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewSummaryDataPoint() SummaryDataPoint { + state := internal.StateMutable + return newSummaryDataPoint(&otlpmetrics.SummaryDataPoint{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms SummaryDataPoint) MoveTo(dest SummaryDataPoint) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.SummaryDataPoint{} +} + +// Attributes returns the Attributes associated with this SummaryDataPoint. +func (ms SummaryDataPoint) Attributes() pcommon.Map { + return pcommon.Map(internal.NewMap(&ms.orig.Attributes, ms.state)) +} + +// StartTimestamp returns the starttimestamp associated with this SummaryDataPoint. +func (ms SummaryDataPoint) StartTimestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.StartTimeUnixNano) +} + +// SetStartTimestamp replaces the starttimestamp associated with this SummaryDataPoint. +func (ms SummaryDataPoint) SetStartTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.StartTimeUnixNano = uint64(v) +} + +// Timestamp returns the timestamp associated with this SummaryDataPoint. +func (ms SummaryDataPoint) Timestamp() pcommon.Timestamp { + return pcommon.Timestamp(ms.orig.TimeUnixNano) +} + +// SetTimestamp replaces the timestamp associated with this SummaryDataPoint. +func (ms SummaryDataPoint) SetTimestamp(v pcommon.Timestamp) { + ms.state.AssertMutable() + ms.orig.TimeUnixNano = uint64(v) +} + +// Count returns the count associated with this SummaryDataPoint. +func (ms SummaryDataPoint) Count() uint64 { + return ms.orig.Count +} + +// SetCount replaces the count associated with this SummaryDataPoint. +func (ms SummaryDataPoint) SetCount(v uint64) { + ms.state.AssertMutable() + ms.orig.Count = v +} + +// Sum returns the sum associated with this SummaryDataPoint. +func (ms SummaryDataPoint) Sum() float64 { + return ms.orig.Sum +} + +// SetSum replaces the sum associated with this SummaryDataPoint. +func (ms SummaryDataPoint) SetSum(v float64) { + ms.state.AssertMutable() + ms.orig.Sum = v +} + +// QuantileValues returns the QuantileValues associated with this SummaryDataPoint. +func (ms SummaryDataPoint) QuantileValues() SummaryDataPointValueAtQuantileSlice { + return newSummaryDataPointValueAtQuantileSlice(&ms.orig.QuantileValues, ms.state) +} + +// Flags returns the flags associated with this SummaryDataPoint. +func (ms SummaryDataPoint) Flags() DataPointFlags { + return DataPointFlags(ms.orig.Flags) +} + +// SetFlags replaces the flags associated with this SummaryDataPoint. +func (ms SummaryDataPoint) SetFlags(v DataPointFlags) { + ms.state.AssertMutable() + ms.orig.Flags = uint32(v) +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms SummaryDataPoint) CopyTo(dest SummaryDataPoint) { + dest.state.AssertMutable() + ms.Attributes().CopyTo(dest.Attributes()) + dest.SetStartTimestamp(ms.StartTimestamp()) + dest.SetTimestamp(ms.Timestamp()) + dest.SetCount(ms.Count()) + dest.SetSum(ms.Sum()) + ms.QuantileValues().CopyTo(dest.QuantileValues()) + dest.SetFlags(ms.Flags()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go new file mode 100644 index 000000000..e8aa51de0 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointslice.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// SummaryDataPointSlice logically represents a slice of SummaryDataPoint. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewSummaryDataPointSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SummaryDataPointSlice struct { + orig *[]*otlpmetrics.SummaryDataPoint + state *internal.State +} + +func newSummaryDataPointSlice(orig *[]*otlpmetrics.SummaryDataPoint, state *internal.State) SummaryDataPointSlice { + return SummaryDataPointSlice{orig: orig, state: state} +} + +// NewSummaryDataPointSlice creates a SummaryDataPointSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewSummaryDataPointSlice() SummaryDataPointSlice { + orig := []*otlpmetrics.SummaryDataPoint(nil) + state := internal.StateMutable + return newSummaryDataPointSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewSummaryDataPointSlice()". +func (es SummaryDataPointSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es SummaryDataPointSlice) At(i int) SummaryDataPoint { + return newSummaryDataPoint((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new SummaryDataPointSlice can be initialized: +// +// es := NewSummaryDataPointSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es SummaryDataPointSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpmetrics.SummaryDataPoint, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty SummaryDataPoint. +// It returns the newly added SummaryDataPoint. +func (es SummaryDataPointSlice) AppendEmpty() SummaryDataPoint { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpmetrics.SummaryDataPoint{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es SummaryDataPointSlice) MoveAndAppendTo(dest SummaryDataPointSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es SummaryDataPointSlice) RemoveIf(f func(SummaryDataPoint) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es SummaryDataPointSlice) CopyTo(dest SummaryDataPointSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newSummaryDataPoint((*es.orig)[i], es.state).CopyTo(newSummaryDataPoint((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpmetrics.SummaryDataPoint, srcLen) + wrappers := make([]*otlpmetrics.SummaryDataPoint, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newSummaryDataPoint((*es.orig)[i], es.state).CopyTo(newSummaryDataPoint(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the SummaryDataPoint elements within SummaryDataPointSlice given the +// provided less function so that two instances of SummaryDataPointSlice +// can be compared. +func (es SummaryDataPointSlice) Sort(less func(a, b SummaryDataPoint) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go new file mode 100644 index 000000000..b4e1fe08f --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantile.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// SummaryDataPointValueAtQuantile is a quantile value within a Summary data point. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewSummaryDataPointValueAtQuantile function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SummaryDataPointValueAtQuantile struct { + orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile + state *internal.State +} + +func newSummaryDataPointValueAtQuantile(orig *otlpmetrics.SummaryDataPoint_ValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantile { + return SummaryDataPointValueAtQuantile{orig: orig, state: state} +} + +// NewSummaryDataPointValueAtQuantile creates a new empty SummaryDataPointValueAtQuantile. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewSummaryDataPointValueAtQuantile() SummaryDataPointValueAtQuantile { + state := internal.StateMutable + return newSummaryDataPointValueAtQuantile(&otlpmetrics.SummaryDataPoint_ValueAtQuantile{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms SummaryDataPointValueAtQuantile) MoveTo(dest SummaryDataPointValueAtQuantile) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpmetrics.SummaryDataPoint_ValueAtQuantile{} +} + +// Quantile returns the quantile associated with this SummaryDataPointValueAtQuantile. +func (ms SummaryDataPointValueAtQuantile) Quantile() float64 { + return ms.orig.Quantile +} + +// SetQuantile replaces the quantile associated with this SummaryDataPointValueAtQuantile. +func (ms SummaryDataPointValueAtQuantile) SetQuantile(v float64) { + ms.state.AssertMutable() + ms.orig.Quantile = v +} + +// Value returns the value associated with this SummaryDataPointValueAtQuantile. +func (ms SummaryDataPointValueAtQuantile) Value() float64 { + return ms.orig.Value +} + +// SetValue replaces the value associated with this SummaryDataPointValueAtQuantile. +func (ms SummaryDataPointValueAtQuantile) SetValue(v float64) { + ms.state.AssertMutable() + ms.orig.Value = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms SummaryDataPointValueAtQuantile) CopyTo(dest SummaryDataPointValueAtQuantile) { + dest.state.AssertMutable() + dest.SetQuantile(ms.Quantile()) + dest.SetValue(ms.Value()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go new file mode 100644 index 000000000..21343f8ce --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/generated_summarydatapointvalueatquantileslice.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetric + +import ( + "sort" + + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +// SummaryDataPointValueAtQuantileSlice logically represents a slice of SummaryDataPointValueAtQuantile. +// +// This is a reference type. If passed by value and callee modifies it, the +// caller will see the modification. +// +// Must use NewSummaryDataPointValueAtQuantileSlice function to create new instances. +// Important: zero-initialized instance is not valid for use. +type SummaryDataPointValueAtQuantileSlice struct { + orig *[]*otlpmetrics.SummaryDataPoint_ValueAtQuantile + state *internal.State +} + +func newSummaryDataPointValueAtQuantileSlice(orig *[]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, state *internal.State) SummaryDataPointValueAtQuantileSlice { + return SummaryDataPointValueAtQuantileSlice{orig: orig, state: state} +} + +// NewSummaryDataPointValueAtQuantileSlice creates a SummaryDataPointValueAtQuantileSlice with 0 elements. +// Can use "EnsureCapacity" to initialize with a given capacity. +func NewSummaryDataPointValueAtQuantileSlice() SummaryDataPointValueAtQuantileSlice { + orig := []*otlpmetrics.SummaryDataPoint_ValueAtQuantile(nil) + state := internal.StateMutable + return newSummaryDataPointValueAtQuantileSlice(&orig, &state) +} + +// Len returns the number of elements in the slice. +// +// Returns "0" for a newly instance created with "NewSummaryDataPointValueAtQuantileSlice()". +func (es SummaryDataPointValueAtQuantileSlice) Len() int { + return len(*es.orig) +} + +// At returns the element at the given index. +// +// This function is used mostly for iterating over all the values in the slice: +// +// for i := 0; i < es.Len(); i++ { +// e := es.At(i) +// ... // Do something with the element +// } +func (es SummaryDataPointValueAtQuantileSlice) At(i int) SummaryDataPointValueAtQuantile { + return newSummaryDataPointValueAtQuantile((*es.orig)[i], es.state) +} + +// EnsureCapacity is an operation that ensures the slice has at least the specified capacity. +// 1. If the newCap <= cap then no change in capacity. +// 2. If the newCap > cap then the slice capacity will be expanded to equal newCap. +// +// Here is how a new SummaryDataPointValueAtQuantileSlice can be initialized: +// +// es := NewSummaryDataPointValueAtQuantileSlice() +// es.EnsureCapacity(4) +// for i := 0; i < 4; i++ { +// e := es.AppendEmpty() +// // Here should set all the values for e. +// } +func (es SummaryDataPointValueAtQuantileSlice) EnsureCapacity(newCap int) { + es.state.AssertMutable() + oldCap := cap(*es.orig) + if newCap <= oldCap { + return + } + + newOrig := make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, len(*es.orig), newCap) + copy(newOrig, *es.orig) + *es.orig = newOrig +} + +// AppendEmpty will append to the end of the slice an empty SummaryDataPointValueAtQuantile. +// It returns the newly added SummaryDataPointValueAtQuantile. +func (es SummaryDataPointValueAtQuantileSlice) AppendEmpty() SummaryDataPointValueAtQuantile { + es.state.AssertMutable() + *es.orig = append(*es.orig, &otlpmetrics.SummaryDataPoint_ValueAtQuantile{}) + return es.At(es.Len() - 1) +} + +// MoveAndAppendTo moves all elements from the current slice and appends them to the dest. +// The current slice will be cleared. +func (es SummaryDataPointValueAtQuantileSlice) MoveAndAppendTo(dest SummaryDataPointValueAtQuantileSlice) { + es.state.AssertMutable() + dest.state.AssertMutable() + if *dest.orig == nil { + // We can simply move the entire vector and avoid any allocations. + *dest.orig = *es.orig + } else { + *dest.orig = append(*dest.orig, *es.orig...) + } + *es.orig = nil +} + +// RemoveIf calls f sequentially for each element present in the slice. +// If f returns true, the element is removed from the slice. +func (es SummaryDataPointValueAtQuantileSlice) RemoveIf(f func(SummaryDataPointValueAtQuantile) bool) { + es.state.AssertMutable() + newLen := 0 + for i := 0; i < len(*es.orig); i++ { + if f(es.At(i)) { + continue + } + if newLen == i { + // Nothing to move, element is at the right place. + newLen++ + continue + } + (*es.orig)[newLen] = (*es.orig)[i] + newLen++ + } + // TODO: Prevent memory leak by erasing truncated values. + *es.orig = (*es.orig)[:newLen] +} + +// CopyTo copies all elements from the current slice overriding the destination. +func (es SummaryDataPointValueAtQuantileSlice) CopyTo(dest SummaryDataPointValueAtQuantileSlice) { + dest.state.AssertMutable() + srcLen := es.Len() + destCap := cap(*dest.orig) + if srcLen <= destCap { + (*dest.orig) = (*dest.orig)[:srcLen:destCap] + for i := range *es.orig { + newSummaryDataPointValueAtQuantile((*es.orig)[i], es.state).CopyTo(newSummaryDataPointValueAtQuantile((*dest.orig)[i], dest.state)) + } + return + } + origs := make([]otlpmetrics.SummaryDataPoint_ValueAtQuantile, srcLen) + wrappers := make([]*otlpmetrics.SummaryDataPoint_ValueAtQuantile, srcLen) + for i := range *es.orig { + wrappers[i] = &origs[i] + newSummaryDataPointValueAtQuantile((*es.orig)[i], es.state).CopyTo(newSummaryDataPointValueAtQuantile(wrappers[i], dest.state)) + } + *dest.orig = wrappers +} + +// Sort sorts the SummaryDataPointValueAtQuantile elements within SummaryDataPointValueAtQuantileSlice given the +// provided less function so that two instances of SummaryDataPointValueAtQuantileSlice +// can be compared. +func (es SummaryDataPointValueAtQuantileSlice) Sort(less func(a, b SummaryDataPointValueAtQuantile) bool) { + es.state.AssertMutable() + sort.SliceStable(*es.orig, func(i, j int) bool { return less(es.At(i), es.At(j)) }) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go new file mode 100644 index 000000000..3df234c20 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/json.go @@ -0,0 +1,431 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" + +import ( + "bytes" + "fmt" + + jsoniter "github.com/json-iterator/go" + + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/internal/otlp" +) + +var _ Marshaler = (*JSONMarshaler)(nil) + +type JSONMarshaler struct{} + +func (*JSONMarshaler) MarshalMetrics(md Metrics) ([]byte, error) { + buf := bytes.Buffer{} + pb := internal.MetricsToProto(internal.Metrics(md)) + err := json.Marshal(&buf, &pb) + return buf.Bytes(), err +} + +type JSONUnmarshaler struct{} + +func (*JSONUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) { + iter := jsoniter.ConfigFastest.BorrowIterator(buf) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + md := NewMetrics() + md.unmarshalJsoniter(iter) + if iter.Error != nil { + return Metrics{}, iter.Error + } + otlp.MigrateMetrics(md.getOrig().ResourceMetrics) + return md, nil +} + +func (ms Metrics) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resource_metrics", "resourceMetrics": + iter.ReadArrayCB(func(iterator *jsoniter.Iterator) bool { + ms.ResourceMetrics().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (ms ResourceMetrics) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "resource": + json.ReadResource(iter, &ms.orig.Resource) + case "scopeMetrics", "scope_metrics": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.ScopeMetrics().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + ms.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (ms ScopeMetrics) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "scope": + json.ReadScope(iter, &ms.orig.Scope) + case "metrics": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.Metrics().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "schemaUrl", "schema_url": + ms.orig.SchemaUrl = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} + +func (ms Metric) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "name": + ms.orig.Name = iter.ReadString() + case "description": + ms.orig.Description = iter.ReadString() + case "unit": + ms.orig.Unit = iter.ReadString() + case "sum": + ms.SetEmptySum().unmarshalJsoniter(iter) + case "gauge": + ms.SetEmptyGauge().unmarshalJsoniter(iter) + case "histogram": + ms.SetEmptyHistogram().unmarshalJsoniter(iter) + case "exponential_histogram", "exponentialHistogram": + ms.SetEmptyExponentialHistogram().unmarshalJsoniter(iter) + case "summary": + ms.SetEmptySummary().unmarshalJsoniter(iter) + default: + iter.Skip() + } + return true + }) +} + +func (ms Sum) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "aggregation_temporality", "aggregationTemporality": + ms.orig.AggregationTemporality = readAggregationTemporality(iter) + case "is_monotonic", "isMonotonic": + ms.orig.IsMonotonic = iter.ReadBool() + case "data_points", "dataPoints": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (ms Gauge) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "data_points", "dataPoints": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (ms Histogram) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "data_points", "dataPoints": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "aggregation_temporality", "aggregationTemporality": + ms.orig.AggregationTemporality = readAggregationTemporality(iter) + default: + iter.Skip() + } + return true + }) +} + +func (ms ExponentialHistogram) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "data_points", "dataPoints": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "aggregation_temporality", "aggregationTemporality": + ms.orig.AggregationTemporality = readAggregationTemporality(iter) + default: + iter.Skip() + } + return true + }) +} + +func (ms Summary) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "data_points", "dataPoints": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.DataPoints().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + default: + iter.Skip() + } + return true + }) +} + +func (ms NumberDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "timeUnixNano", "time_unix_nano": + ms.orig.TimeUnixNano = json.ReadUint64(iter) + case "start_time_unix_nano", "startTimeUnixNano": + ms.orig.StartTimeUnixNano = json.ReadUint64(iter) + case "as_int", "asInt": + ms.orig.Value = &otlpmetrics.NumberDataPoint_AsInt{ + AsInt: json.ReadInt64(iter), + } + case "as_double", "asDouble": + ms.orig.Value = &otlpmetrics.NumberDataPoint_AsDouble{ + AsDouble: json.ReadFloat64(iter), + } + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter)) + return true + }) + case "exemplars": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.Exemplars().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "flags": + ms.orig.Flags = json.ReadUint32(iter) + default: + iter.Skip() + } + return true + }) +} + +func (ms HistogramDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "timeUnixNano", "time_unix_nano": + ms.orig.TimeUnixNano = json.ReadUint64(iter) + case "start_time_unix_nano", "startTimeUnixNano": + ms.orig.StartTimeUnixNano = json.ReadUint64(iter) + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter)) + return true + }) + case "count": + ms.orig.Count = json.ReadUint64(iter) + case "sum": + ms.orig.Sum_ = &otlpmetrics.HistogramDataPoint_Sum{Sum: json.ReadFloat64(iter)} + case "bucket_counts", "bucketCounts": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.orig.BucketCounts = append(ms.orig.BucketCounts, json.ReadUint64(iter)) + return true + }) + case "explicit_bounds", "explicitBounds": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.orig.ExplicitBounds = append(ms.orig.ExplicitBounds, json.ReadFloat64(iter)) + return true + }) + case "exemplars": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.Exemplars().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "flags": + ms.orig.Flags = json.ReadUint32(iter) + case "max": + ms.orig.Max_ = &otlpmetrics.HistogramDataPoint_Max{ + Max: json.ReadFloat64(iter), + } + case "min": + ms.orig.Min_ = &otlpmetrics.HistogramDataPoint_Min{ + Min: json.ReadFloat64(iter), + } + default: + iter.Skip() + } + return true + }) +} + +func (ms ExponentialHistogramDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "timeUnixNano", "time_unix_nano": + ms.orig.TimeUnixNano = json.ReadUint64(iter) + case "start_time_unix_nano", "startTimeUnixNano": + ms.orig.StartTimeUnixNano = json.ReadUint64(iter) + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter)) + return true + }) + case "count": + ms.orig.Count = json.ReadUint64(iter) + case "sum": + ms.orig.Sum_ = &otlpmetrics.ExponentialHistogramDataPoint_Sum{ + Sum: json.ReadFloat64(iter), + } + case "scale": + ms.orig.Scale = iter.ReadInt32() + case "zero_count", "zeroCount": + ms.orig.ZeroCount = json.ReadUint64(iter) + case "positive": + ms.Positive().unmarshalJsoniter(iter) + case "negative": + ms.Negative().unmarshalJsoniter(iter) + case "exemplars": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.Exemplars().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "flags": + ms.orig.Flags = json.ReadUint32(iter) + case "max": + ms.orig.Max_ = &otlpmetrics.ExponentialHistogramDataPoint_Max{ + Max: json.ReadFloat64(iter), + } + case "min": + ms.orig.Min_ = &otlpmetrics.ExponentialHistogramDataPoint_Min{ + Min: json.ReadFloat64(iter), + } + default: + iter.Skip() + } + return true + }) +} + +func (ms SummaryDataPoint) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "timeUnixNano", "time_unix_nano": + ms.orig.TimeUnixNano = json.ReadUint64(iter) + case "start_time_unix_nano", "startTimeUnixNano": + ms.orig.StartTimeUnixNano = json.ReadUint64(iter) + case "attributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.orig.Attributes = append(ms.orig.Attributes, json.ReadAttribute(iter)) + return true + }) + case "count": + ms.orig.Count = json.ReadUint64(iter) + case "sum": + ms.orig.Sum = json.ReadFloat64(iter) + case "quantile_values", "quantileValues": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.QuantileValues().AppendEmpty().unmarshalJsoniter(iter) + return true + }) + case "flags": + ms.orig.Flags = json.ReadUint32(iter) + default: + iter.Skip() + } + return true + }) +} + +func (ms ExponentialHistogramDataPointBuckets) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "bucket_counts", "bucketCounts": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.orig.BucketCounts = append(ms.orig.BucketCounts, json.ReadUint64(iter)) + return true + }) + case "offset": + ms.orig.Offset = iter.ReadInt32() + default: + iter.Skip() + } + return true + }) +} + +func (ms SummaryDataPointValueAtQuantile) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "quantile": + ms.orig.Quantile = json.ReadFloat64(iter) + case "value": + ms.orig.Value = json.ReadFloat64(iter) + default: + iter.Skip() + } + return true + }) +} + +func (ms Exemplar) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "filtered_attributes", "filteredAttributes": + iter.ReadArrayCB(func(iter *jsoniter.Iterator) bool { + ms.orig.FilteredAttributes = append(ms.orig.FilteredAttributes, json.ReadAttribute(iter)) + return true + }) + case "timeUnixNano", "time_unix_nano": + ms.orig.TimeUnixNano = json.ReadUint64(iter) + case "as_int", "asInt": + ms.orig.Value = &otlpmetrics.Exemplar_AsInt{ + AsInt: json.ReadInt64(iter), + } + case "as_double", "asDouble": + ms.orig.Value = &otlpmetrics.Exemplar_AsDouble{ + AsDouble: json.ReadFloat64(iter), + } + case "traceId", "trace_id": + if err := ms.orig.TraceId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("exemplar.traceId", fmt.Sprintf("parse trace_id:%v", err)) + } + case "spanId", "span_id": + if err := ms.orig.SpanId.UnmarshalJSON([]byte(iter.ReadString())); err != nil { + iter.ReportError("exemplar.spanId", fmt.Sprintf("parse span_id:%v", err)) + } + default: + iter.Skip() + } + return true + }) +} + +func readAggregationTemporality(iter *jsoniter.Iterator) otlpmetrics.AggregationTemporality { + return otlpmetrics.AggregationTemporality(json.ReadEnumValue(iter, otlpmetrics.AggregationTemporality_value)) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/metric_data_point_flags.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/metric_data_point_flags.go new file mode 100644 index 000000000..bc385fa2b --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/metric_data_point_flags.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" + +const noRecordValueMask = uint32(1) + +var DefaultDataPointFlags = DataPointFlags(0) + +// DataPointFlags defines how a metric aggregator reports aggregated values. +// It describes how those values relate to the time interval over which they are aggregated. +type DataPointFlags uint32 + +// NoRecordedValue returns true if the DataPointFlags contains the NoRecordedValue flag. +func (ms DataPointFlags) NoRecordedValue() bool { + return uint32(ms)&noRecordValueMask != 0 +} + +// WithNoRecordedValue returns a new DataPointFlags, with the NoRecordedValue flag set to the given value. +func (ms DataPointFlags) WithNoRecordedValue(b bool) DataPointFlags { + orig := uint32(ms) + if b { + orig |= noRecordValueMask + } else { + orig &^= noRecordValueMask + } + return DataPointFlags(orig) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/metric_type.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/metric_type.go new file mode 100644 index 000000000..f1ac1e289 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/metric_type.go @@ -0,0 +1,36 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" + +// MetricType specifies the type of data in a Metric. +type MetricType int32 + +const ( + // MetricTypeEmpty means that metric type is unset. + MetricTypeEmpty MetricType = iota + MetricTypeGauge + MetricTypeSum + MetricTypeHistogram + MetricTypeExponentialHistogram + MetricTypeSummary +) + +// String returns the string representation of the MetricType. +func (mdt MetricType) String() string { + switch mdt { + case MetricTypeEmpty: + return "Empty" + case MetricTypeGauge: + return "Gauge" + case MetricTypeSum: + return "Sum" + case MetricTypeHistogram: + return "Histogram" + case MetricTypeExponentialHistogram: + return "ExponentialHistogram" + case MetricTypeSummary: + return "Summary" + } + return "" +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/metrics.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/metrics.go new file mode 100644 index 000000000..91195ca4d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/metrics.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" +) + +// Metrics is the top-level struct that is propagated through the metrics pipeline. +// Use NewMetrics to create new instance, zero-initialized instance is not valid for use. +type Metrics internal.Metrics + +func newMetrics(orig *otlpcollectormetrics.ExportMetricsServiceRequest) Metrics { + state := internal.StateMutable + return Metrics(internal.NewMetrics(orig, &state)) +} + +func (ms Metrics) getOrig() *otlpcollectormetrics.ExportMetricsServiceRequest { + return internal.GetOrigMetrics(internal.Metrics(ms)) +} + +func (ms Metrics) getState() *internal.State { + return internal.GetMetricsState(internal.Metrics(ms)) +} + +// NewMetrics creates a new Metrics struct. +func NewMetrics() Metrics { + return newMetrics(&otlpcollectormetrics.ExportMetricsServiceRequest{}) +} + +// IsReadOnly returns true if this Metrics instance is read-only. +func (ms Metrics) IsReadOnly() bool { + return *ms.getState() == internal.StateReadOnly +} + +// CopyTo copies the Metrics instance overriding the destination. +func (ms Metrics) CopyTo(dest Metrics) { + ms.ResourceMetrics().CopyTo(dest.ResourceMetrics()) +} + +// ResourceMetrics returns the ResourceMetricsSlice associated with this Metrics. +func (ms Metrics) ResourceMetrics() ResourceMetricsSlice { + return newResourceMetricsSlice(&ms.getOrig().ResourceMetrics, internal.GetMetricsState(internal.Metrics(ms))) +} + +// MetricCount calculates the total number of metrics. +func (ms Metrics) MetricCount() int { + metricCount := 0 + rms := ms.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rm := rms.At(i) + ilms := rm.ScopeMetrics() + for j := 0; j < ilms.Len(); j++ { + ilm := ilms.At(j) + metricCount += ilm.Metrics().Len() + } + } + return metricCount +} + +// DataPointCount calculates the total number of data points. +func (ms Metrics) DataPointCount() (dataPointCount int) { + rms := ms.ResourceMetrics() + for i := 0; i < rms.Len(); i++ { + rm := rms.At(i) + ilms := rm.ScopeMetrics() + for j := 0; j < ilms.Len(); j++ { + ilm := ilms.At(j) + ms := ilm.Metrics() + for k := 0; k < ms.Len(); k++ { + m := ms.At(k) + switch m.Type() { + case MetricTypeGauge: + dataPointCount += m.Gauge().DataPoints().Len() + case MetricTypeSum: + dataPointCount += m.Sum().DataPoints().Len() + case MetricTypeHistogram: + dataPointCount += m.Histogram().DataPoints().Len() + case MetricTypeExponentialHistogram: + dataPointCount += m.ExponentialHistogram().DataPoints().Len() + case MetricTypeSummary: + dataPointCount += m.Summary().DataPoints().Len() + } + } + } + } + return +} + +// MarkReadOnly marks the Metrics as shared so that no further modifications can be done on it. +func (ms Metrics) MarkReadOnly() { + internal.SetMetricsState(internal.Metrics(ms), internal.StateReadOnly) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/number_data_point_value_type.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/number_data_point_value_type.go new file mode 100644 index 000000000..7a7da34f5 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/number_data_point_value_type.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" + +// NumberDataPointValueType specifies the type of NumberDataPoint value. +type NumberDataPointValueType int32 + +const ( + // NumberDataPointValueTypeEmpty means that data point value is unset. + NumberDataPointValueTypeEmpty NumberDataPointValueType = iota + NumberDataPointValueTypeInt + NumberDataPointValueTypeDouble +) + +// String returns the string representation of the NumberDataPointValueType. +func (nt NumberDataPointValueType) String() string { + switch nt { + case NumberDataPointValueTypeEmpty: + return "Empty" + case NumberDataPointValueTypeInt: + return "Int" + case NumberDataPointValueTypeDouble: + return "Double" + } + return "" +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go new file mode 100644 index 000000000..580f555d7 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pb.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetric // import "go.opentelemetry.io/collector/pdata/pmetric" + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpmetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1" +) + +var _ MarshalSizer = (*ProtoMarshaler)(nil) + +type ProtoMarshaler struct{} + +func (e *ProtoMarshaler) MarshalMetrics(md Metrics) ([]byte, error) { + pb := internal.MetricsToProto(internal.Metrics(md)) + return pb.Marshal() +} + +func (e *ProtoMarshaler) MetricsSize(md Metrics) int { + pb := internal.MetricsToProto(internal.Metrics(md)) + return pb.Size() +} + +type ProtoUnmarshaler struct{} + +func (d *ProtoUnmarshaler) UnmarshalMetrics(buf []byte) (Metrics, error) { + pb := otlpmetrics.MetricsData{} + err := pb.Unmarshal(buf) + return Metrics(internal.MetricsFromProto(pb)), err +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go new file mode 100644 index 000000000..60aa6a03e --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/generated_exportpartialsuccess.go @@ -0,0 +1,75 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated by "pdata/internal/cmd/pdatagen/main.go". DO NOT EDIT. +// To regenerate this file run "make genpdata". + +package pmetricotlp + +import ( + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" +) + +// ExportPartialSuccess represents the details of a partially successful export request. +// +// This is a reference type, if passed by value and callee modifies it the +// caller will see the modification. +// +// Must use NewExportPartialSuccess function to create new instances. +// Important: zero-initialized instance is not valid for use. +type ExportPartialSuccess struct { + orig *otlpcollectormetrics.ExportMetricsPartialSuccess + state *internal.State +} + +func newExportPartialSuccess(orig *otlpcollectormetrics.ExportMetricsPartialSuccess, state *internal.State) ExportPartialSuccess { + return ExportPartialSuccess{orig: orig, state: state} +} + +// NewExportPartialSuccess creates a new empty ExportPartialSuccess. +// +// This must be used only in testing code. Users should use "AppendEmpty" when part of a Slice, +// OR directly access the member if this is embedded in another struct. +func NewExportPartialSuccess() ExportPartialSuccess { + state := internal.StateMutable + return newExportPartialSuccess(&otlpcollectormetrics.ExportMetricsPartialSuccess{}, &state) +} + +// MoveTo moves all properties from the current struct overriding the destination and +// resetting the current instance to its zero value +func (ms ExportPartialSuccess) MoveTo(dest ExportPartialSuccess) { + ms.state.AssertMutable() + dest.state.AssertMutable() + *dest.orig = *ms.orig + *ms.orig = otlpcollectormetrics.ExportMetricsPartialSuccess{} +} + +// RejectedDataPoints returns the rejecteddatapoints associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) RejectedDataPoints() int64 { + return ms.orig.RejectedDataPoints +} + +// SetRejectedDataPoints replaces the rejecteddatapoints associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) SetRejectedDataPoints(v int64) { + ms.state.AssertMutable() + ms.orig.RejectedDataPoints = v +} + +// ErrorMessage returns the errormessage associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) ErrorMessage() string { + return ms.orig.ErrorMessage +} + +// SetErrorMessage replaces the errormessage associated with this ExportPartialSuccess. +func (ms ExportPartialSuccess) SetErrorMessage(v string) { + ms.state.AssertMutable() + ms.orig.ErrorMessage = v +} + +// CopyTo copies all properties from the current struct overriding the destination. +func (ms ExportPartialSuccess) CopyTo(dest ExportPartialSuccess) { + dest.state.AssertMutable() + dest.SetRejectedDataPoints(ms.RejectedDataPoints()) + dest.SetErrorMessage(ms.ErrorMessage()) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go new file mode 100644 index 000000000..98d864d73 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/grpc.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + +import ( + "context" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" + "go.opentelemetry.io/collector/pdata/internal/otlp" +) + +// GRPCClient is the client API for OTLP-GRPC Metrics service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type GRPCClient interface { + // Export pmetric.Metrics to the server. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) + + // unexported disallow implementation of the GRPCClient. + unexported() +} + +// NewGRPCClient returns a new GRPCClient connected using the given connection. +func NewGRPCClient(cc *grpc.ClientConn) GRPCClient { + return &grpcClient{rawClient: otlpcollectormetrics.NewMetricsServiceClient(cc)} +} + +type grpcClient struct { + rawClient otlpcollectormetrics.MetricsServiceClient +} + +func (c *grpcClient) Export(ctx context.Context, request ExportRequest, opts ...grpc.CallOption) (ExportResponse, error) { + rsp, err := c.rawClient.Export(ctx, request.orig, opts...) + if err != nil { + return ExportResponse{}, err + } + state := internal.StateMutable + return ExportResponse{orig: rsp, state: &state}, err +} + +func (c *grpcClient) unexported() {} + +// GRPCServer is the server API for OTLP gRPC MetricsService service. +// Implementations MUST embed UnimplementedGRPCServer. +type GRPCServer interface { + // Export is called every time a new request is received. + // + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, ExportRequest) (ExportResponse, error) + + // unexported disallow implementation of the GRPCServer. + unexported() +} + +var _ GRPCServer = (*UnimplementedGRPCServer)(nil) + +// UnimplementedGRPCServer MUST be embedded to have forward compatible implementations. +type UnimplementedGRPCServer struct{} + +func (*UnimplementedGRPCServer) Export(context.Context, ExportRequest) (ExportResponse, error) { + return ExportResponse{}, status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func (*UnimplementedGRPCServer) unexported() {} + +// RegisterGRPCServer registers the GRPCServer to the grpc.Server. +func RegisterGRPCServer(s *grpc.Server, srv GRPCServer) { + otlpcollectormetrics.RegisterMetricsServiceServer(s, &rawMetricsServer{srv: srv}) +} + +type rawMetricsServer struct { + srv GRPCServer +} + +func (s rawMetricsServer) Export(ctx context.Context, request *otlpcollectormetrics.ExportMetricsServiceRequest) (*otlpcollectormetrics.ExportMetricsServiceResponse, error) { + otlp.MigrateMetrics(request.ResourceMetrics) + state := internal.StateMutable + rsp, err := s.srv.Export(ctx, ExportRequest{orig: request, state: &state}) + return rsp.orig, err +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go new file mode 100644 index 000000000..4cca31a60 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/request.go @@ -0,0 +1,74 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + +import ( + "bytes" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" + "go.opentelemetry.io/collector/pdata/internal/json" + "go.opentelemetry.io/collector/pdata/pmetric" +) + +var jsonUnmarshaler = &pmetric.JSONUnmarshaler{} + +// ExportRequest represents the request for gRPC/HTTP client/server. +// It's a wrapper for pmetric.Metrics data. +type ExportRequest struct { + orig *otlpcollectormetrics.ExportMetricsServiceRequest + state *internal.State +} + +// NewExportRequest returns an empty ExportRequest. +func NewExportRequest() ExportRequest { + state := internal.StateMutable + return ExportRequest{ + orig: &otlpcollectormetrics.ExportMetricsServiceRequest{}, + state: &state, + } +} + +// NewExportRequestFromMetrics returns a ExportRequest from pmetric.Metrics. +// Because ExportRequest is a wrapper for pmetric.Metrics, +// any changes to the provided Metrics struct will be reflected in the ExportRequest and vice versa. +func NewExportRequestFromMetrics(md pmetric.Metrics) ExportRequest { + return ExportRequest{ + orig: internal.GetOrigMetrics(internal.Metrics(md)), + state: internal.GetMetricsState(internal.Metrics(md)), + } +} + +// MarshalProto marshals ExportRequest into proto bytes. +func (ms ExportRequest) MarshalProto() ([]byte, error) { + return ms.orig.Marshal() +} + +// UnmarshalProto unmarshalls ExportRequest from proto bytes. +func (ms ExportRequest) UnmarshalProto(data []byte) error { + return ms.orig.Unmarshal(data) +} + +// MarshalJSON marshals ExportRequest into JSON bytes. +func (ms ExportRequest) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if err := json.Marshal(&buf, ms.orig); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls ExportRequest from JSON bytes. +func (ms ExportRequest) UnmarshalJSON(data []byte) error { + md, err := jsonUnmarshaler.UnmarshalMetrics(data) + if err != nil { + return err + } + *ms.orig = *internal.GetOrigMetrics(internal.Metrics(md)) + return nil +} + +func (ms ExportRequest) Metrics() pmetric.Metrics { + return pmetric.Metrics(internal.NewMetrics(ms.orig, ms.state)) +} diff --git a/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go new file mode 100644 index 000000000..e928400f3 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp/response.go @@ -0,0 +1,87 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package pmetricotlp // import "go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp" + +import ( + "bytes" + + jsoniter "github.com/json-iterator/go" + + "go.opentelemetry.io/collector/pdata/internal" + otlpcollectormetrics "go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1" + "go.opentelemetry.io/collector/pdata/internal/json" +) + +// ExportResponse represents the response for gRPC/HTTP client/server. +type ExportResponse struct { + orig *otlpcollectormetrics.ExportMetricsServiceResponse + state *internal.State +} + +// NewExportResponse returns an empty ExportResponse. +func NewExportResponse() ExportResponse { + state := internal.StateMutable + return ExportResponse{ + orig: &otlpcollectormetrics.ExportMetricsServiceResponse{}, + state: &state, + } +} + +// MarshalProto marshals ExportResponse into proto bytes. +func (ms ExportResponse) MarshalProto() ([]byte, error) { + return ms.orig.Marshal() +} + +// UnmarshalProto unmarshalls ExportResponse from proto bytes. +func (ms ExportResponse) UnmarshalProto(data []byte) error { + return ms.orig.Unmarshal(data) +} + +// MarshalJSON marshals ExportResponse into JSON bytes. +func (ms ExportResponse) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if err := json.Marshal(&buf, ms.orig); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// UnmarshalJSON unmarshalls ExportResponse from JSON bytes. +func (ms ExportResponse) UnmarshalJSON(data []byte) error { + iter := jsoniter.ConfigFastest.BorrowIterator(data) + defer jsoniter.ConfigFastest.ReturnIterator(iter) + ms.unmarshalJsoniter(iter) + return iter.Error +} + +// PartialSuccess returns the ExportLogsPartialSuccess associated with this ExportResponse. +func (ms ExportResponse) PartialSuccess() ExportPartialSuccess { + return newExportPartialSuccess(&ms.orig.PartialSuccess, ms.state) +} + +func (ms ExportResponse) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iter *jsoniter.Iterator, f string) bool { + switch f { + case "partial_success", "partialSuccess": + ms.PartialSuccess().unmarshalJsoniter(iter) + default: + iter.Skip() + } + return true + }) +} + +func (ms ExportPartialSuccess) unmarshalJsoniter(iter *jsoniter.Iterator) { + iter.ReadObjectCB(func(iterator *jsoniter.Iterator, f string) bool { + switch f { + case "rejected_data_points", "rejectedDataPoints": + ms.orig.RejectedDataPoints = json.ReadInt64(iter) + case "error_message", "errorMessage": + ms.orig.ErrorMessage = iter.ReadString() + default: + iter.Skip() + } + return true + }) +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/LICENSE b/vendor/go.opentelemetry.io/collector/semconv/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go new file mode 100644 index 000000000..1a9bc4376 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_resource.go @@ -0,0 +1,991 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.6.1" + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeCloudProvider = "cloud.provider" + // The cloud account ID the resource is assigned to. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + AttributeCloudAccountID = "cloud.account.id" + // The geographical region the resource is running. Refer to your provider's docs + // to see the available regions, for example Alibaba Cloud regions, AWS regions, + // Azure regions, or Google Cloud regions. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + AttributeCloudRegion = "cloud.region" + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and + // Google Cloud. + AttributeCloudAvailabilityZone = "cloud.availability_zone" + // The cloud platform in use. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // cloud.provider. + AttributeCloudPlatform = "cloud.platform" +) + +const ( + // Alibaba Cloud + AttributeCloudProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeCloudProviderAWS = "aws" + // Microsoft Azure + AttributeCloudProviderAzure = "azure" + // Google Cloud Platform + AttributeCloudProviderGCP = "gcp" +) + +const ( + // Alibaba Cloud Elastic Compute Service + AttributeCloudPlatformAlibabaCloudECS = "alibaba_cloud_ecs" + // Alibaba Cloud Function Compute + AttributeCloudPlatformAlibabaCloudFc = "alibaba_cloud_fc" + // AWS Elastic Compute Cloud + AttributeCloudPlatformAWSEC2 = "aws_ec2" + // AWS Elastic Container Service + AttributeCloudPlatformAWSECS = "aws_ecs" + // AWS Elastic Kubernetes Service + AttributeCloudPlatformAWSEKS = "aws_eks" + // AWS Lambda + AttributeCloudPlatformAWSLambda = "aws_lambda" + // AWS Elastic Beanstalk + AttributeCloudPlatformAWSElasticBeanstalk = "aws_elastic_beanstalk" + // Azure Virtual Machines + AttributeCloudPlatformAzureVM = "azure_vm" + // Azure Container Instances + AttributeCloudPlatformAzureContainerInstances = "azure_container_instances" + // Azure Kubernetes Service + AttributeCloudPlatformAzureAKS = "azure_aks" + // Azure Functions + AttributeCloudPlatformAzureFunctions = "azure_functions" + // Azure App Service + AttributeCloudPlatformAzureAppService = "azure_app_service" + // Google Cloud Compute Engine (GCE) + AttributeCloudPlatformGCPComputeEngine = "gcp_compute_engine" + // Google Cloud Run + AttributeCloudPlatformGCPCloudRun = "gcp_cloud_run" + // Google Cloud Kubernetes Engine (GKE) + AttributeCloudPlatformGCPKubernetesEngine = "gcp_kubernetes_engine" + // Google Cloud Functions (GCF) + AttributeCloudPlatformGCPCloudFunctions = "gcp_cloud_functions" + // Google Cloud App Engine (GAE) + AttributeCloudPlatformGCPAppEngine = "gcp_app_engine" +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an ECS container instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AttributeAWSECSContainerARN = "aws.ecs.container.arn" + // The ARN of an ECS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSECSClusterARN = "aws.ecs.cluster.arn" + // The launch type for an ECS task. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeAWSECSLaunchtype = "aws.ecs.launchtype" + // The ARN of an ECS task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSECSTaskARN = "aws.ecs.task.arn" + // The task definition family this task definition is a member of. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-family' + AttributeAWSECSTaskFamily = "aws.ecs.task.family" + // The revision for this task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '8', '26' + AttributeAWSECSTaskRevision = "aws.ecs.task.revision" +) + +const ( + // ec2 + AttributeAWSECSLaunchtypeEC2 = "ec2" + // fargate + AttributeAWSECSLaunchtypeFargate = "fargate" +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AttributeAWSEKSClusterARN = "aws.eks.cluster.arn" +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AttributeAWSLogGroupNames = "aws.log.group.names" + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the log group ARN format documentation. + AttributeAWSLogGroupARNs = "aws.log.group.arns" + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AttributeAWSLogStreamNames = "aws.log.stream.names" + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the log stream ARN format documentation. One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AttributeAWSLogStreamARNs = "aws.log.stream.arns" +) + +// A container instance. +const ( + // Container name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-autoconf' + AttributeContainerName = "container.name" + // Container ID. Usually a UUID, as for example used to identify Docker + // containers. The UUID might be abbreviated. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'a3bf90e006b2' + AttributeContainerID = "container.id" + // The container runtime managing this container. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + AttributeContainerRuntime = "container.runtime" + // Name of the image the container was built on. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + AttributeContainerImageName = "container.image.name" + // Container image tag. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + AttributeContainerImageTag = "container.image.tag" +) + +// The software deployment. +const ( + // Name of the deployment environment (aka deployment tier). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'staging', 'production' + AttributeDeploymentEnvironment = "deployment.environment" +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the vendor + // identifier. On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found here on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + AttributeDeviceID = "device.id" + // The model identifier for the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + AttributeDeviceModelIdentifier = "device.model.identifier" + // The marketing name for the device model + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + AttributeDeviceModelName = "device.model.name" +) + +// A serverless instance. +const ( + // The name of the single function that this runtime instance executes. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback function (which + // may be stored in the code.namespace/code.function span attributes). + AttributeFaaSName = "faas.name" + // The unique ID of the single function that this runtime instance executes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: Depending on the cloud provider, use:
      + //
    • AWS Lambda: The function ARN.
    • + //
    + // Take care not to use the "invoked ARN" directly but replace any + // alias suffix with the resolved function version, as the same runtime instance + // may be invokable with multiple + // different aliases.
      + //
    • GCP: The URI of the resource
    • + //
    • Azure: The Fully Qualified Resource ID.
    • + //
    + // On some providers, it may not be possible to determine the full ID at startup, + // which is why this field cannot be made required. For example, on AWS the + // account ID + // part of the ARN is not available without calling another AWS API + // which may be deemed too slow for a short-running lambda function. + // As an alternative, consider setting faas.id as a span attribute instead. + AttributeFaaSID = "faas.id" + // The immutable version of the function being executed. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use:
      + //
    • AWS Lambda: The function version + // (an integer represented as a decimal string).
    • + //
    • Google Cloud Run: The revision + // (i.e., the function name plus the revision suffix).
    • + //
    • Google Cloud Functions: The value of the + // K_REVISION environment variable.
    • + //
    • Azure Functions: Not applicable. Do not set this attribute.
    • + //
    + AttributeFaaSVersion = "faas.version" + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note:
      + //
    • AWS Lambda: Use the (full) log stream name.
    • + //
    + AttributeFaaSInstance = "faas.instance" + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable AWS_LAMBDA_FUNCTION_MEMORY_SIZE provides this + // information. + AttributeFaaSMaxMemory = "faas.max_memory" +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + AttributeHostID = "host.id" + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + AttributeHostName = "host.name" + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'n1-standard-1' + AttributeHostType = "host.type" + // The CPU architecture the host system is running on. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeHostArch = "host.arch" + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + AttributeHostImageName = "host.image.name" + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + AttributeHostImageID = "host.image.id" + // The version string of the VM image as defined in Version Attributes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + AttributeHostImageVersion = "host.image.version" +) + +const ( + // AMD64 + AttributeHostArchAMD64 = "amd64" + // ARM32 + AttributeHostArchARM32 = "arm32" + // ARM64 + AttributeHostArchARM64 = "arm64" + // Itanium + AttributeHostArchIA64 = "ia64" + // 32-bit PowerPC + AttributeHostArchPPC32 = "ppc32" + // 64-bit PowerPC + AttributeHostArchPPC64 = "ppc64" + // 32-bit x86 + AttributeHostArchX86 = "x86" +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-cluster' + AttributeK8SClusterName = "k8s.cluster.name" +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'node-1' + AttributeK8SNodeName = "k8s.node.name" + // The UID of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + AttributeK8SNodeUID = "k8s.node.uid" +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'default' + AttributeK8SNamespaceName = "k8s.namespace.name" +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SPodUID = "k8s.pod.uid" + // The name of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + AttributeK8SPodName = "k8s.pod.name" +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container in a Pod template. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'redis' + AttributeK8SContainerName = "k8s.container.name" +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SReplicaSetUID = "k8s.replicaset.uid" + // The name of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SReplicaSetName = "k8s.replicaset.name" +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDeploymentUID = "k8s.deployment.uid" + // The name of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SDeploymentName = "k8s.deployment.name" +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SStatefulSetUID = "k8s.statefulset.uid" + // The name of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SStatefulSetName = "k8s.statefulset.name" +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SDaemonSetUID = "k8s.daemonset.uid" + // The name of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SDaemonSetName = "k8s.daemonset.name" +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SJobUID = "k8s.job.uid" + // The name of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SJobName = "k8s.job.name" +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + AttributeK8SCronJobUID = "k8s.cronjob.uid" + // The name of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeK8SCronJobName = "k8s.cronjob.name" +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeOSType = "os.type" + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by ver or lsb_release -a commands. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + AttributeOSDescription = "os.description" + // Human readable operating system name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + AttributeOSName = "os.name" + // The version string of the operating system as defined in Version Attributes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.2.1', '18.04.1' + AttributeOSVersion = "os.version" +) + +const ( + // Microsoft Windows + AttributeOSTypeWindows = "windows" + // Linux + AttributeOSTypeLinux = "linux" + // Apple Darwin + AttributeOSTypeDarwin = "darwin" + // FreeBSD + AttributeOSTypeFreeBSD = "freebsd" + // NetBSD + AttributeOSTypeNetBSD = "netbsd" + // OpenBSD + AttributeOSTypeOpenBSD = "openbsd" + // DragonFly BSD + AttributeOSTypeDragonflyBSD = "dragonflybsd" + // HP-UX (Hewlett Packard Unix) + AttributeOSTypeHPUX = "hpux" + // AIX (Advanced Interactive eXecutive) + AttributeOSTypeAIX = "aix" + // Oracle Solaris + AttributeOSTypeSolaris = "solaris" + // IBM z/OS + AttributeOSTypeZOS = "z_os" +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 1234 + AttributeProcessPID = "process.pid" + // The name of the process executable. On Linux based systems, can be set to the + // Name in proc/[pid]/status. On Windows, can be set to the base name of + // GetProcessImageFileNameW. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'otelcol' + AttributeProcessExecutableName = "process.executable.name" + // The full path to the process executable. On Linux based systems, can be set to + // the target of proc/[pid]/exe. On Windows, can be set to the result of + // GetProcessImageFileNameW. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + AttributeProcessExecutablePath = "process.executable.path" + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in proc/[pid]/cmdline. On Windows, can + // be set to the first parameter extracted from GetCommandLineW. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'cmd/otelcol' + AttributeProcessCommand = "process.command" + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of GetCommandLineW. Do not + // set this if you have to assemble it just for monitoring; use + // process.command_args instead. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + AttributeProcessCommandLine = "process.command_line" + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from proc/[pid]/cmdline. For libc-based executables, this would be + // the full argv vector passed to main. + // + // Type: string[] + // Required: See below + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + AttributeProcessCommandArgs = "process.command_args" + // The username of the user that owns the process. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'root' + AttributeProcessOwner = "process.owner" +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + AttributeProcessRuntimeName = "process.runtime.name" + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.0.2' + AttributeProcessRuntimeVersion = "process.runtime.version" + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + AttributeProcessRuntimeDescription = "process.runtime.description" +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to unknown_service: + // concatenated with process.executable.name, e.g. unknown_service:bash. If + // process.executable.name is not available, the value MUST be set to + // unknown_service. + AttributeServiceName = "service.name" + // A namespace for service.name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. service.name + // is expected to be unique within the same namespace. If service.namespace is not + // specified in the Resource then service.name is expected to be unique for all + // services that have no explicit namespace defined (so the empty/unspecified + // namespace is simply one more valid namespace). Zero-length namespace string is + // assumed equal to unspecified namespace. + AttributeServiceNamespace = "service.namespace" + // The string ID of the service instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // service.namespace,service.name pair (in other words + // service.namespace,service.name,service.instance.id triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + AttributeServiceInstanceID = "service.instance.id" + // The version string of the service API or implementation. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2.0.0' + AttributeServiceVersion = "service.version" +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + AttributeTelemetrySDKName = "telemetry.sdk.name" + // The language of the telemetry SDK. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeTelemetrySDKLanguage = "telemetry.sdk.language" + // The version string of the telemetry SDK. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetrySDKVersion = "telemetry.sdk.version" + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + AttributeTelemetryAutoVersion = "telemetry.auto.version" +) + +const ( + // cpp + AttributeTelemetrySDKLanguageCPP = "cpp" + // dotnet + AttributeTelemetrySDKLanguageDotnet = "dotnet" + // erlang + AttributeTelemetrySDKLanguageErlang = "erlang" + // go + AttributeTelemetrySDKLanguageGo = "go" + // java + AttributeTelemetrySDKLanguageJava = "java" + // nodejs + AttributeTelemetrySDKLanguageNodejs = "nodejs" + // php + AttributeTelemetrySDKLanguagePHP = "php" + // python + AttributeTelemetrySDKLanguagePython = "python" + // ruby + AttributeTelemetrySDKLanguageRuby = "ruby" + // webjs + AttributeTelemetrySDKLanguageWebjs = "webjs" +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'WildFly' + AttributeWebEngineName = "webengine.name" + // The version of the web engine. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '21.0.0' + AttributeWebEngineVersion = "webengine.version" + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + AttributeWebEngineDescription = "webengine.description" +) + +func GetResourceSemanticConventionAttributeNames() []string { + return []string{ + AttributeCloudProvider, + AttributeCloudAccountID, + AttributeCloudRegion, + AttributeCloudAvailabilityZone, + AttributeCloudPlatform, + AttributeAWSECSContainerARN, + AttributeAWSECSClusterARN, + AttributeAWSECSLaunchtype, + AttributeAWSECSTaskARN, + AttributeAWSECSTaskFamily, + AttributeAWSECSTaskRevision, + AttributeAWSEKSClusterARN, + AttributeAWSLogGroupNames, + AttributeAWSLogGroupARNs, + AttributeAWSLogStreamNames, + AttributeAWSLogStreamARNs, + AttributeContainerName, + AttributeContainerID, + AttributeContainerRuntime, + AttributeContainerImageName, + AttributeContainerImageTag, + AttributeDeploymentEnvironment, + AttributeDeviceID, + AttributeDeviceModelIdentifier, + AttributeDeviceModelName, + AttributeFaaSName, + AttributeFaaSID, + AttributeFaaSVersion, + AttributeFaaSInstance, + AttributeFaaSMaxMemory, + AttributeHostID, + AttributeHostName, + AttributeHostType, + AttributeHostArch, + AttributeHostImageName, + AttributeHostImageID, + AttributeHostImageVersion, + AttributeK8SClusterName, + AttributeK8SNodeName, + AttributeK8SNodeUID, + AttributeK8SNamespaceName, + AttributeK8SPodUID, + AttributeK8SPodName, + AttributeK8SContainerName, + AttributeK8SReplicaSetUID, + AttributeK8SReplicaSetName, + AttributeK8SDeploymentUID, + AttributeK8SDeploymentName, + AttributeK8SStatefulSetUID, + AttributeK8SStatefulSetName, + AttributeK8SDaemonSetUID, + AttributeK8SDaemonSetName, + AttributeK8SJobUID, + AttributeK8SJobName, + AttributeK8SCronJobUID, + AttributeK8SCronJobName, + AttributeOSType, + AttributeOSDescription, + AttributeOSName, + AttributeOSVersion, + AttributeProcessPID, + AttributeProcessExecutableName, + AttributeProcessExecutablePath, + AttributeProcessCommand, + AttributeProcessCommandLine, + AttributeProcessCommandArgs, + AttributeProcessOwner, + AttributeProcessRuntimeName, + AttributeProcessRuntimeVersion, + AttributeProcessRuntimeDescription, + AttributeServiceName, + AttributeServiceNamespace, + AttributeServiceInstanceID, + AttributeServiceVersion, + AttributeTelemetrySDKName, + AttributeTelemetrySDKLanguage, + AttributeTelemetrySDKVersion, + AttributeTelemetryAutoVersion, + AttributeWebEngineName, + AttributeWebEngineVersion, + AttributeWebEngineDescription, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_trace.go b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_trace.go new file mode 100644 index 000000000..741801984 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/generated_trace.go @@ -0,0 +1,1587 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.6.1" + +// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +const ( + // The full invoked ARN as provided on the Context passed to the function (Lambda- + // Runtime-Invoked-Function-ARN header on the /runtime/invocation/next + // applicable). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from faas.id if an alias is involved. + AttributeAWSLambdaInvokedARN = "aws.lambda.invoked_arn" +) + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeDBSystem = "db.system" + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + AttributeDBConnectionString = "db.connection_string" + // Username for accessing the database. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + AttributeDBUser = "db.user" + // The fully-qualified class name of the Java Database Connectivity (JDBC) driver + // used to connect. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + AttributeDBJDBCDriverClassname = "db.jdbc.driver_classname" + // If no tech-specific attribute is defined, this attribute is used to report the + // name of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // Required: Required, if applicable and no more-specific attribute is defined. + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". + AttributeDBName = "db.name" + // The database statement being executed. + // + // Type: string + // Required: Required if applicable and not explicitly disabled via + // instrumentation configuration. + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + AttributeDBStatement = "db.statement" + // The name of the operation being executed, e.g. the MongoDB command name such as + // findAndModify, or the SQL keyword. + // + // Type: string + // Required: Required, if `db.statement` is not applicable. + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of db.statement just to get this property, but it should be + // set if the operation name is provided by the library being instrumented. If the + // SQL statement has an ambiguous operation, or performs more than one operation, + // this value may be omitted. + AttributeDBOperation = "db.operation" +) + +const ( + // Some other SQL database. Fallback only. See notes + AttributeDBSystemOtherSQL = "other_sql" + // Microsoft SQL Server + AttributeDBSystemMSSQL = "mssql" + // MySQL + AttributeDBSystemMySQL = "mysql" + // Oracle Database + AttributeDBSystemOracle = "oracle" + // IBM DB2 + AttributeDBSystemDB2 = "db2" + // PostgreSQL + AttributeDBSystemPostgreSQL = "postgresql" + // Amazon Redshift + AttributeDBSystemRedshift = "redshift" + // Apache Hive + AttributeDBSystemHive = "hive" + // Cloudscape + AttributeDBSystemCloudscape = "cloudscape" + // HyperSQL DataBase + AttributeDBSystemHSQLDB = "hsqldb" + // Progress Database + AttributeDBSystemProgress = "progress" + // SAP MaxDB + AttributeDBSystemMaxDB = "maxdb" + // SAP HANA + AttributeDBSystemHanaDB = "hanadb" + // Ingres + AttributeDBSystemIngres = "ingres" + // FirstSQL + AttributeDBSystemFirstSQL = "firstsql" + // EnterpriseDB + AttributeDBSystemEDB = "edb" + // InterSystems Caché + AttributeDBSystemCache = "cache" + // Adabas (Adaptable Database System) + AttributeDBSystemAdabas = "adabas" + // Firebird + AttributeDBSystemFirebird = "firebird" + // Apache Derby + AttributeDBSystemDerby = "derby" + // FileMaker + AttributeDBSystemFilemaker = "filemaker" + // Informix + AttributeDBSystemInformix = "informix" + // InstantDB + AttributeDBSystemInstantDB = "instantdb" + // InterBase + AttributeDBSystemInterbase = "interbase" + // MariaDB + AttributeDBSystemMariaDB = "mariadb" + // Netezza + AttributeDBSystemNetezza = "netezza" + // Pervasive PSQL + AttributeDBSystemPervasive = "pervasive" + // PointBase + AttributeDBSystemPointbase = "pointbase" + // SQLite + AttributeDBSystemSqlite = "sqlite" + // Sybase + AttributeDBSystemSybase = "sybase" + // Teradata + AttributeDBSystemTeradata = "teradata" + // Vertica + AttributeDBSystemVertica = "vertica" + // H2 + AttributeDBSystemH2 = "h2" + // ColdFusion IMQ + AttributeDBSystemColdfusion = "coldfusion" + // Apache Cassandra + AttributeDBSystemCassandra = "cassandra" + // Apache HBase + AttributeDBSystemHBase = "hbase" + // MongoDB + AttributeDBSystemMongoDB = "mongodb" + // Redis + AttributeDBSystemRedis = "redis" + // Couchbase + AttributeDBSystemCouchbase = "couchbase" + // CouchDB + AttributeDBSystemCouchDB = "couchdb" + // Microsoft Azure Cosmos DB + AttributeDBSystemCosmosDB = "cosmosdb" + // Amazon DynamoDB + AttributeDBSystemDynamoDB = "dynamodb" + // Neo4j + AttributeDBSystemNeo4j = "neo4j" + // Apache Geode + AttributeDBSystemGeode = "geode" + // Elasticsearch + AttributeDBSystemElasticsearch = "elasticsearch" + // Memcached + AttributeDBSystemMemcached = "memcached" + // CockroachDB + AttributeDBSystemCockroachdb = "cockroachdb" +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server instance name connecting to. This name is used to + // determine the port of a named instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a db.mssql.instance_name, net.peer.port is no longer required + // (but still recommended if non-standard). + AttributeDBMSSQLInstanceName = "db.mssql.instance_name" +) + +// Call-level attributes for Cassandra +const ( + // The name of the keyspace being accessed. To be used instead of the generic + // db.name attribute. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'mykeyspace' + AttributeDBCassandraKeyspace = "db.cassandra.keyspace" + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5000 + AttributeDBCassandraPageSize = "db.cassandra.page_size" + // The consistency level of the query. Based on consistency values from CQL. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeDBCassandraConsistencyLevel = "db.cassandra.consistency_level" + // The name of the primary table that the operation is acting upon, including the + // schema name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // db.statement just to get this property, but it should be set if it is provided + // by the library being instrumented. If the operation is acting upon an anonymous + // table, or more than one table, this value MUST NOT be set. + AttributeDBCassandraTable = "db.cassandra.table" + // Whether or not the query is idempotent. + // + // Type: boolean + // Required: No + // Stability: stable + AttributeDBCassandraIdempotence = "db.cassandra.idempotence" + // The number of times a query was speculatively executed. Not set or 0 if the + // query was not executed speculatively. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + AttributeDBCassandraSpeculativeExecutionCount = "db.cassandra.speculative_execution_count" + // The ID of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + AttributeDBCassandraCoordinatorID = "db.cassandra.coordinator.id" + // The data center of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-west-2' + AttributeDBCassandraCoordinatorDC = "db.cassandra.coordinator.dc" +) + +const ( + // all + AttributeDBCassandraConsistencyLevelAll = "all" + // each_quorum + AttributeDBCassandraConsistencyLevelEachQuorum = "each_quorum" + // quorum + AttributeDBCassandraConsistencyLevelQuorum = "quorum" + // local_quorum + AttributeDBCassandraConsistencyLevelLocalQuorum = "local_quorum" + // one + AttributeDBCassandraConsistencyLevelOne = "one" + // two + AttributeDBCassandraConsistencyLevelTwo = "two" + // three + AttributeDBCassandraConsistencyLevelThree = "three" + // local_one + AttributeDBCassandraConsistencyLevelLocalOne = "local_one" + // any + AttributeDBCassandraConsistencyLevelAny = "any" + // serial + AttributeDBCassandraConsistencyLevelSerial = "serial" + // local_serial + AttributeDBCassandraConsistencyLevelLocalSerial = "local_serial" +) + +// Call-level attributes for Apache HBase +const ( + // The HBase namespace being accessed. To be used instead of the generic db.name + // attribute. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'default' + AttributeDBHBaseNamespace = "db.hbase.namespace" +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the SELECT command, + // provided as an integer. To be used instead of the generic db.name attribute. + // + // Type: int + // Required: Required, if other than the default database (`0`). + // Stability: stable + // Examples: 0, 1, 15 + AttributeDBRedisDBIndex = "db.redis.database_index" +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in db.name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'customers', 'products' + AttributeDBMongoDBCollection = "db.mongodb.collection" +) + +// Call-level attrbiutes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // schema name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of db.statement + // just to get this property, but it should be set if it is provided by the + // library being instrumented. If the operation is acting upon an anonymous table, + // or more than one table, this value MUST NOT be set. + AttributeDBSQLTable = "db.sql.table" +) + +// This document defines the attributes used to report a single exception associated with a span. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + AttributeExceptionType = "exception.type" + // The exception message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + AttributeExceptionMessage = "exception.message" + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + AttributeExceptionStacktrace = "exception.stacktrace" + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Required: No + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's __exit__ method in Python) but will + // usually be caught at the point of recording the exception in most languages.It + // is usually not possible to determine at the point where an exception is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the example above.It follows that an exception may still escape the + // scope of the span + // even if the exception.escaped attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + AttributeExceptionEscaped = "exception.escaped" +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger on which the function is executed. + // + // Type: Enum + // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. + // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing + // invocations, if it is known to the client. This is, for example, not the case, + // when the transport layer is abstracted in a FaaS client framework without + // access to its configuration. + // Stability: stable + AttributeFaaSTrigger = "faas.trigger" + // The execution ID of the current function execution. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + AttributeFaaSExecution = "faas.execution" +) + +const ( + // A response to some data source operation such as a database or filesystem read/write + AttributeFaaSTriggerDatasource = "datasource" + // To provide an answer to an inbound HTTP request + AttributeFaaSTriggerHTTP = "http" + // A function is set to be executed when messages are sent to a messaging system + AttributeFaaSTriggerPubsub = "pubsub" + // A function is scheduled to be executed regularly + AttributeFaaSTriggerTimer = "timer" + // If none of the others apply + AttributeFaaSTriggerOther = "other" +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + AttributeFaaSDocumentCollection = "faas.document.collection" + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeFaaSDocumentOperation = "faas.document.operation" + // A string containing the time when the data was accessed in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSDocumentTime = "faas.document.time" + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + AttributeFaaSDocumentName = "faas.document.name" +) + +const ( + // When a new object is created + AttributeFaaSDocumentOperationInsert = "insert" + // When an object is modified + AttributeFaaSDocumentOperationEdit = "edit" + // When an object is deleted + AttributeFaaSDocumentOperationDelete = "delete" +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the ISO 8601 format + // expressed in UTC. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + AttributeFaaSTime = "faas.time" + // A string containing the schedule period as Cron Expression. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0/5 * * * ? *' + AttributeFaaSCron = "faas.cron" +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Required: No + // Stability: stable + AttributeFaaSColdstart = "faas.coldstart" +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the faas.name resource attribute of the invoked + // function. + AttributeFaaSInvokedName = "faas.invoked_name" + // The cloud provider of the invoked function. + // + // Type: Enum + // Required: Always + // Stability: stable + // Note: SHOULD be equal to the cloud.provider resource attribute of the invoked + // function. + AttributeFaaSInvokedProvider = "faas.invoked_provider" + // The cloud region of the invoked function. + // + // Type: string + // Required: For some cloud providers, like AWS or GCP, the region in which a + // function is hosted is essential to uniquely identify the function and also part + // of its endpoint. Since it's part of the endpoint being called, the region is + // always known to clients. In these cases, `faas.invoked_region` MUST be set + // accordingly. If the region is unknown to the client or not required for + // identifying the invoked function, setting `faas.invoked_region` is optional. + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the cloud.region resource attribute of the invoked + // function. + AttributeFaaSInvokedRegion = "faas.invoked_region" +) + +const ( + // Alibaba Cloud + AttributeFaaSInvokedProviderAlibabaCloud = "alibaba_cloud" + // Amazon Web Services + AttributeFaaSInvokedProviderAWS = "aws" + // Microsoft Azure + AttributeFaaSInvokedProviderAzure = "azure" + // Google Cloud Platform + AttributeFaaSInvokedProviderGCP = "gcp" +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeNetTransport = "net.transport" + // Remote address of the peer (dotted decimal for IPv4 or RFC5952 for IPv6) + // + // Type: string + // Required: No + // Stability: stable + // Examples: '127.0.0.1' + AttributeNetPeerIP = "net.peer.ip" + // Remote port number. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 80, 8080, 443 + AttributeNetPeerPort = "net.peer.port" + // Remote hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + AttributeNetPeerName = "net.peer.name" + // Like net.peer.ip but for the host IP. Useful in case of a multi-IP host. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '192.168.0.1' + AttributeNetHostIP = "net.host.ip" + // Like net.peer.port but for the host port. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 35555 + AttributeNetHostPort = "net.host.port" + // Local hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'localhost' + AttributeNetHostName = "net.host.name" + // The internet connection type currently being used by the host. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'wifi' + AttributeNetHostConnectionType = "net.host.connection.type" + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'LTE' + AttributeNetHostConnectionSubtype = "net.host.connection.subtype" + // The name of the mobile carrier. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'sprint' + AttributeNetHostCarrierName = "net.host.carrier.name" + // The mobile carrier country code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '310' + AttributeNetHostCarrierMcc = "net.host.carrier.mcc" + // The mobile carrier network code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '001' + AttributeNetHostCarrierMnc = "net.host.carrier.mnc" + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'DE' + AttributeNetHostCarrierIcc = "net.host.carrier.icc" +) + +const ( + // ip_tcp + AttributeNetTransportTCP = "ip_tcp" + // ip_udp + AttributeNetTransportUDP = "ip_udp" + // Another IP-based protocol + AttributeNetTransportIP = "ip" + // Unix Domain socket. See below + AttributeNetTransportUnix = "unix" + // Named or anonymous pipe. See note below + AttributeNetTransportPipe = "pipe" + // In-process communication + AttributeNetTransportInProc = "inproc" + // Something else (non IP-based) + AttributeNetTransportOther = "other" +) + +const ( + // wifi + AttributeNetHostConnectionTypeWifi = "wifi" + // wired + AttributeNetHostConnectionTypeWired = "wired" + // cell + AttributeNetHostConnectionTypeCell = "cell" + // unavailable + AttributeNetHostConnectionTypeUnavailable = "unavailable" + // unknown + AttributeNetHostConnectionTypeUnknown = "unknown" +) + +const ( + // GPRS + AttributeNetHostConnectionSubtypeGprs = "gprs" + // EDGE + AttributeNetHostConnectionSubtypeEdge = "edge" + // UMTS + AttributeNetHostConnectionSubtypeUmts = "umts" + // CDMA + AttributeNetHostConnectionSubtypeCdma = "cdma" + // EVDO Rel. 0 + AttributeNetHostConnectionSubtypeEvdo0 = "evdo_0" + // EVDO Rev. A + AttributeNetHostConnectionSubtypeEvdoA = "evdo_a" + // CDMA2000 1XRTT + AttributeNetHostConnectionSubtypeCdma20001xrtt = "cdma2000_1xrtt" + // HSDPA + AttributeNetHostConnectionSubtypeHsdpa = "hsdpa" + // HSUPA + AttributeNetHostConnectionSubtypeHsupa = "hsupa" + // HSPA + AttributeNetHostConnectionSubtypeHspa = "hspa" + // IDEN + AttributeNetHostConnectionSubtypeIden = "iden" + // EVDO Rev. B + AttributeNetHostConnectionSubtypeEvdoB = "evdo_b" + // LTE + AttributeNetHostConnectionSubtypeLte = "lte" + // EHRPD + AttributeNetHostConnectionSubtypeEhrpd = "ehrpd" + // HSPAP + AttributeNetHostConnectionSubtypeHspap = "hspap" + // GSM + AttributeNetHostConnectionSubtypeGsm = "gsm" + // TD-SCDMA + AttributeNetHostConnectionSubtypeTdScdma = "td_scdma" + // IWLAN + AttributeNetHostConnectionSubtypeIwlan = "iwlan" + // 5G NR (New Radio) + AttributeNetHostConnectionSubtypeNr = "nr" + // 5G NRNSA (New Radio Non-Standalone) + AttributeNetHostConnectionSubtypeNrnsa = "nrnsa" + // LTE CA + AttributeNetHostConnectionSubtypeLteCa = "lte_ca" +) + +// Operations that access some remote service. +const ( + // The service.name of the remote service. SHOULD be equal to the actual + // service.name resource attribute of the remote service if any. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AuthTokenCache' + AttributePeerService = "peer.service" +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or Authorization header + // in the inbound request from outside the system. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'username' + AttributeEnduserID = "enduser.id" + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'admin' + AttributeEnduserRole = "enduser.role" + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an OAuth 2.0 Access Token or an attribute value in a SAML 2.0 + // Assertion. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'read:message, write:files' + AttributeEnduserScope = "enduser.scope" +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + AttributeThreadID = "thread.id" + // Current thread name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'main' + AttributeThreadName = "thread.name" +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'serveRequest' + AttributeCodeFunction = "code.function" + // The "namespace" within which code.function is defined. Usually the + // qualified class or module name, such that code.namespace + some separator + + // code.function form a unique identifier for the code unit. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'com.example.MyHTTPService' + AttributeCodeNamespace = "code.namespace" + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + AttributeCodeFilepath = "code.filepath" + // The line number in code.filepath best representing the operation. It SHOULD + // point within the code unit named in code.function. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + AttributeCodeLineNumber = "code.lineno" +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + AttributeHTTPMethod = "http.method" + // Full HTTP request URL in the form scheme://host[:port]/path?query[#fragment]. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: http.url MUST NOT contain credentials passed via URL in form of + // https://username:password@www.example.com/. In such case the attribute's value + // should be https://www.example.com/. + AttributeHTTPURL = "http.url" + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/path/12314/?q=ddds#123' + AttributeHTTPTarget = "http.target" + // The value of the HTTP host header. When the header is empty or not present, + // this attribute should be the same. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'www.example.org' + AttributeHTTPHost = "http.host" + // The URI scheme identifying the used protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'http', 'https' + AttributeHTTPScheme = "http.scheme" + // HTTP response status code. + // + // Type: int + // Required: If and only if one was received/sent. + // Stability: stable + // Examples: 200 + AttributeHTTPStatusCode = "http.status_code" + // Kind of HTTP protocol used. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: If net.transport is not specified, it can be assumed to be IP.TCP except + // if http.flavor is QUIC, in which case IP.UDP is assumed. + AttributeHTTPFlavor = "http.flavor" + // Value of the HTTP User-Agent header sent by the client. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + AttributeHTTPUserAgent = "http.user_agent" + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + AttributeHTTPRequestContentLength = "http.request_content_length" + // The size of the uncompressed request payload body after transport decoding. Not + // set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + AttributeHTTPRequestContentLengthUncompressed = "http.request_content_length_uncompressed" + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // Content-Length header. For requests using transport encoding, this should be + // the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + AttributeHTTPResponseContentLength = "http.response_content_length" + // The size of the uncompressed response payload body after transport decoding. + // Not set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + AttributeHTTPResponseContentLengthUncompressed = "http.response_content_length_uncompressed" +) + +const ( + // HTTP 1.0 + AttributeHTTPFlavorHTTP10 = "1.0" + // HTTP 1.1 + AttributeHTTPFlavorHTTP11 = "1.1" + // HTTP 2 + AttributeHTTPFlavorHTTP20 = "2.0" + // SPDY protocol + AttributeHTTPFlavorSPDY = "SPDY" + // QUIC protocol + AttributeHTTPFlavorQUIC = "QUIC" +) + +// Semantic Convention for HTTP Server +const ( + // The primary server name of the matched virtual host. This should be obtained + // via configuration. If no such configuration can be obtained, this attribute + // MUST NOT be set ( net.host.name should be used instead). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: http.url is usually not readily available on the server side but would + // have to be assembled in a cumbersome and sometimes lossy process from other + // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus + // preferred to supply the raw data that is available. + AttributeHTTPServerName = "http.server_name" + // The matched route (path template). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/users/:userID?' + AttributeHTTPRoute = "http.route" + // The IP address of the original client behind all proxies, if known (e.g. from + // X-Forwarded-For). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as net.peer.ip, which would identify the + // network-level peer, which may be a proxy. + AttributeHTTPClientIP = "http.client_ip" +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the RequestItems object field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'Users', 'Cats' + AttributeAWSDynamoDBTableNames = "aws.dynamodb.table_names" + // The JSON-serialized value of each item in the ConsumedCapacity response field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AttributeAWSDynamoDBConsumedCapacity = "aws.dynamodb.consumed_capacity" + // The JSON-serialized value of the ItemCollectionMetrics response field. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AttributeAWSDynamoDBItemCollectionMetrics = "aws.dynamodb.item_collection_metrics" + // The value of the ProvisionedThroughput.ReadCapacityUnits request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedReadCapacity = "aws.dynamodb.provisioned_read_capacity" + // The value of the ProvisionedThroughput.WriteCapacityUnits request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AttributeAWSDynamoDBProvisionedWriteCapacity = "aws.dynamodb.provisioned_write_capacity" + // The value of the ConsistentRead request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AttributeAWSDynamoDBConsistentRead = "aws.dynamodb.consistent_read" + // The value of the ProjectionExpression request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AttributeAWSDynamoDBProjection = "aws.dynamodb.projection" + // The value of the Limit request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBLimit = "aws.dynamodb.limit" + // The value of the AttributesToGet request parameter. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'lives', 'id' + AttributeAWSDynamoDBAttributesToGet = "aws.dynamodb.attributes_to_get" + // The value of the IndexName request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'name_to_group' + AttributeAWSDynamoDBIndexName = "aws.dynamodb.index_name" + // The value of the Select request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AttributeAWSDynamoDBSelect = "aws.dynamodb.select" +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the GlobalSecondaryIndexes request + // field + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexes = "aws.dynamodb.global_secondary_indexes" + // The JSON-serialized value of each item of the LocalSecondaryIndexes request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AttributeAWSDynamoDBLocalSecondaryIndexes = "aws.dynamodb.local_secondary_indexes" +) + +// DynamoDB.ListTables +const ( + // The value of the ExclusiveStartTableName request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Users', 'CatsTable' + AttributeAWSDynamoDBExclusiveStartTable = "aws.dynamodb.exclusive_start_table" + // The the number of items in the TableNames response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 20 + AttributeAWSDynamoDBTableCount = "aws.dynamodb.table_count" +) + +// DynamoDB.Query +const ( + // The value of the ScanIndexForward request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AttributeAWSDynamoDBScanForward = "aws.dynamodb.scan_forward" +) + +// DynamoDB.Scan +const ( + // The value of the Segment request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBSegment = "aws.dynamodb.segment" + // The value of the TotalSegments request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 100 + AttributeAWSDynamoDBTotalSegments = "aws.dynamodb.total_segments" + // The value of the Count response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AttributeAWSDynamoDBCount = "aws.dynamodb.count" + // The value of the ScannedCount response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 50 + AttributeAWSDynamoDBScannedCount = "aws.dynamodb.scanned_count" +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the AttributeDefinitions request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AttributeAWSDynamoDBAttributeDefinitions = "aws.dynamodb.attribute_definitions" + // The JSON-serialized value of each item in the the GlobalSecondaryIndexUpdates + // request field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates = "aws.dynamodb.global_secondary_index_updates" +) + +// This document defines the attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' + AttributeMessagingSystem = "messaging.system" + // The message destination name. This might be equal to the span name but is + // required nevertheless. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + AttributeMessagingDestination = "messaging.destination" + // The kind of message destination + // + // Type: Enum + // Required: Required only if the message destination is either a `queue` or + // `topic`. + // Stability: stable + AttributeMessagingDestinationKind = "messaging.destination_kind" + // A boolean that is true if the message destination is temporary. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + AttributeMessagingTempDestination = "messaging.temp_destination" + // The name of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AMQP', 'MQTT' + AttributeMessagingProtocol = "messaging.protocol" + // The version of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.9.1' + AttributeMessagingProtocolVersion = "messaging.protocol_version" + // Connection string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tibjmsnaming://localhost:7222', + // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' + AttributeMessagingURL = "messaging.url" + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + AttributeMessagingMessageID = "messaging.message_id" + // The conversation ID identifying the conversation to which the message belongs, + // represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MyConversationID' + AttributeMessagingConversationID = "messaging.conversation_id" + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2738 + AttributeMessagingMessagePayloadSizeBytes = "messaging.message_payload_size_bytes" + // The compressed size of the message payload in bytes. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2048 + AttributeMessagingMessagePayloadCompressedSizeBytes = "messaging.message_payload_compressed_size_bytes" +) + +const ( + // A message sent to a queue + AttributeMessagingDestinationKindQueue = "queue" + // A message sent to a topic + AttributeMessagingDestinationKindTopic = "topic" +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // A string identifying the kind of message consumption as defined in the + // Operation names section above. If the operation is "send", this + // attribute MUST NOT be set, since the operation can be inferred from the span + // kind in that case. + // + // Type: Enum + // Required: No + // Stability: stable + AttributeMessagingOperation = "messaging.operation" +) + +const ( + // receive + AttributeMessagingOperationReceive = "receive" + // process + AttributeMessagingOperationProcess = "process" +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // Required: Unless it is empty. + // Stability: stable + // Examples: 'myKey' + AttributeMessagingRabbitmqRoutingKey = "messaging.rabbitmq.routing_key" +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from messaging.message_id in that + // they're not unique. If the key is null, the attribute MUST NOT be set. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + AttributeMessagingKafkaMessageKey = "messaging.kafka.message_key" + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'my-group' + AttributeMessagingKafkaConsumerGroup = "messaging.kafka.consumer_group" + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'client-5' + AttributeMessagingKafkaClientID = "messaging.kafka.client_id" + // Partition the message is sent to. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2 + AttributeMessagingKafkaPartition = "messaging.kafka.partition" + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + AttributeMessagingKafkaTombstone = "messaging.kafka.tombstone" +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'grpc', 'java_rmi', 'wcf' + AttributeRPCSystem = "rpc.system" + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The code.namespace attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + AttributeRPCService = "rpc.service" + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The code.function attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + AttributeRPCMethod = "rpc.method" +) + +// Tech-specific attributes for gRPC. +const ( + // The numeric status code of the gRPC request. + // + // Type: Enum + // Required: Always + // Stability: stable + AttributeRPCGRPCStatusCode = "rpc.grpc.status_code" +) + +const ( + // OK + AttributeRPCGRPCStatusCodeOk = "0" + // CANCELLED + AttributeRPCGRPCStatusCodeCancelled = "1" + // UNKNOWN + AttributeRPCGRPCStatusCodeUnknown = "2" + // INVALID_ARGUMENT + AttributeRPCGRPCStatusCodeInvalidArgument = "3" + // DEADLINE_EXCEEDED + AttributeRPCGRPCStatusCodeDeadlineExceeded = "4" + // NOT_FOUND + AttributeRPCGRPCStatusCodeNotFound = "5" + // ALREADY_EXISTS + AttributeRPCGRPCStatusCodeAlreadyExists = "6" + // PERMISSION_DENIED + AttributeRPCGRPCStatusCodePermissionDenied = "7" + // RESOURCE_EXHAUSTED + AttributeRPCGRPCStatusCodeResourceExhausted = "8" + // FAILED_PRECONDITION + AttributeRPCGRPCStatusCodeFailedPrecondition = "9" + // ABORTED + AttributeRPCGRPCStatusCodeAborted = "10" + // OUT_OF_RANGE + AttributeRPCGRPCStatusCodeOutOfRange = "11" + // UNIMPLEMENTED + AttributeRPCGRPCStatusCodeUnimplemented = "12" + // INTERNAL + AttributeRPCGRPCStatusCodeInternal = "13" + // UNAVAILABLE + AttributeRPCGRPCStatusCodeUnavailable = "14" + // DATA_LOSS + AttributeRPCGRPCStatusCodeDataLoss = "15" + // UNAUTHENTICATED + AttributeRPCGRPCStatusCodeUnauthenticated = "16" +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in jsonrpc property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // Required: If missing, it is assumed to be "1.0". + // Stability: stable + // Examples: '2.0', '1.0' + AttributeRPCJsonrpcVersion = "rpc.jsonrpc.version" + // id property of request or response. Since protocol allows id to be int, string, + // null or missing (for notifications), value is expected to be cast to string for + // simplicity. Use empty string in case of null value. Omit entirely if this is a + // notification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '10', 'request-7', '' + AttributeRPCJsonrpcRequestID = "rpc.jsonrpc.request_id" + // error.code property of response if it is an error response. + // + // Type: int + // Required: If missing, response is assumed to be successful. + // Stability: stable + // Examples: -32700, 100 + AttributeRPCJsonrpcErrorCode = "rpc.jsonrpc.error_code" + // error.message property of response if it is an error response. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Parse error', 'User already exists' + AttributeRPCJsonrpcErrorMessage = "rpc.jsonrpc.error_message" +) + +func GetTraceSemanticConventionAttributeNames() []string { + return []string{ + AttributeAWSLambdaInvokedARN, + AttributeDBSystem, + AttributeDBConnectionString, + AttributeDBUser, + AttributeDBJDBCDriverClassname, + AttributeDBName, + AttributeDBStatement, + AttributeDBOperation, + AttributeDBMSSQLInstanceName, + AttributeDBCassandraKeyspace, + AttributeDBCassandraPageSize, + AttributeDBCassandraConsistencyLevel, + AttributeDBCassandraTable, + AttributeDBCassandraIdempotence, + AttributeDBCassandraSpeculativeExecutionCount, + AttributeDBCassandraCoordinatorID, + AttributeDBCassandraCoordinatorDC, + AttributeDBHBaseNamespace, + AttributeDBRedisDBIndex, + AttributeDBMongoDBCollection, + AttributeDBSQLTable, + AttributeExceptionType, + AttributeExceptionMessage, + AttributeExceptionStacktrace, + AttributeExceptionEscaped, + AttributeFaaSTrigger, + AttributeFaaSExecution, + AttributeFaaSDocumentCollection, + AttributeFaaSDocumentOperation, + AttributeFaaSDocumentTime, + AttributeFaaSDocumentName, + AttributeFaaSTime, + AttributeFaaSCron, + AttributeFaaSColdstart, + AttributeFaaSInvokedName, + AttributeFaaSInvokedProvider, + AttributeFaaSInvokedRegion, + AttributeNetTransport, + AttributeNetPeerIP, + AttributeNetPeerPort, + AttributeNetPeerName, + AttributeNetHostIP, + AttributeNetHostPort, + AttributeNetHostName, + AttributeNetHostConnectionType, + AttributeNetHostConnectionSubtype, + AttributeNetHostCarrierName, + AttributeNetHostCarrierMcc, + AttributeNetHostCarrierMnc, + AttributeNetHostCarrierIcc, + AttributePeerService, + AttributeEnduserID, + AttributeEnduserRole, + AttributeEnduserScope, + AttributeThreadID, + AttributeThreadName, + AttributeCodeFunction, + AttributeCodeNamespace, + AttributeCodeFilepath, + AttributeCodeLineNumber, + AttributeHTTPMethod, + AttributeHTTPURL, + AttributeHTTPTarget, + AttributeHTTPHost, + AttributeHTTPScheme, + AttributeHTTPStatusCode, + AttributeHTTPFlavor, + AttributeHTTPUserAgent, + AttributeHTTPRequestContentLength, + AttributeHTTPRequestContentLengthUncompressed, + AttributeHTTPResponseContentLength, + AttributeHTTPResponseContentLengthUncompressed, + AttributeHTTPServerName, + AttributeHTTPRoute, + AttributeHTTPClientIP, + AttributeAWSDynamoDBTableNames, + AttributeAWSDynamoDBConsumedCapacity, + AttributeAWSDynamoDBItemCollectionMetrics, + AttributeAWSDynamoDBProvisionedReadCapacity, + AttributeAWSDynamoDBProvisionedWriteCapacity, + AttributeAWSDynamoDBConsistentRead, + AttributeAWSDynamoDBProjection, + AttributeAWSDynamoDBLimit, + AttributeAWSDynamoDBAttributesToGet, + AttributeAWSDynamoDBIndexName, + AttributeAWSDynamoDBSelect, + AttributeAWSDynamoDBGlobalSecondaryIndexes, + AttributeAWSDynamoDBLocalSecondaryIndexes, + AttributeAWSDynamoDBExclusiveStartTable, + AttributeAWSDynamoDBTableCount, + AttributeAWSDynamoDBScanForward, + AttributeAWSDynamoDBSegment, + AttributeAWSDynamoDBTotalSegments, + AttributeAWSDynamoDBCount, + AttributeAWSDynamoDBScannedCount, + AttributeAWSDynamoDBAttributeDefinitions, + AttributeAWSDynamoDBGlobalSecondaryIndexUpdates, + AttributeMessagingSystem, + AttributeMessagingDestination, + AttributeMessagingDestinationKind, + AttributeMessagingTempDestination, + AttributeMessagingProtocol, + AttributeMessagingProtocolVersion, + AttributeMessagingURL, + AttributeMessagingMessageID, + AttributeMessagingConversationID, + AttributeMessagingMessagePayloadSizeBytes, + AttributeMessagingMessagePayloadCompressedSizeBytes, + AttributeMessagingOperation, + AttributeMessagingRabbitmqRoutingKey, + AttributeMessagingKafkaMessageKey, + AttributeMessagingKafkaConsumerGroup, + AttributeMessagingKafkaClientID, + AttributeMessagingKafkaPartition, + AttributeMessagingKafkaTombstone, + AttributeRPCSystem, + AttributeRPCService, + AttributeRPCMethod, + AttributeRPCGRPCStatusCode, + AttributeRPCJsonrpcVersion, + AttributeRPCJsonrpcRequestID, + AttributeRPCJsonrpcErrorCode, + AttributeRPCJsonrpcErrorMessage, + } +} diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/nonstandard.go b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/nonstandard.go new file mode 100644 index 000000000..cea103bdf --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/nonstandard.go @@ -0,0 +1,11 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.6.1" + +const ( + OtelLibraryName = "otel.library.name" + OtelLibraryVersion = "otel.library.version" + OtelStatusCode = "otel.status_code" + OtelStatusDescription = "otel.status_description" +) diff --git a/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/schema.go b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/schema.go new file mode 100644 index 000000000..589a94a05 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/semconv/v1.6.1/schema.go @@ -0,0 +1,9 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/collector/semconv/v1.6.1" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Conventions packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.6.1" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 123365cae..b2fbe0784 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -219,7 +219,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) // Add metrics - attributes := append(labeler.Get(), semconvutil.HTTPServerRequest(h.server, r)...) + attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...) if rww.statusCode > 0 { attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index 9bab232e3..d3dede9eb 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -83,6 +83,30 @@ func HTTPServerRequest(server string, req *http.Request) []attribute.KeyValue { return hc.ServerRequest(server, req) } +// HTTPServerRequestMetrics returns metric attributes for an HTTP request received by a +// server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func HTTPServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequestMetrics(server, req) +} + // HTTPServerStatus returns a span status code and message for an HTTP status code // value returned by a server. Status codes in the 400-499 range are not // returned as errors. @@ -217,7 +241,7 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.proto(req.Proto)) + attrs = append(attrs, c.flavor(req.Proto)) var u string if req.URL != nil { @@ -318,7 +342,7 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.proto(req.Proto)) + attrs = append(attrs, c.flavor(req.Proto)) attrs = append(attrs, c.NetConv.HostName(host)) if hostPort > 0 { @@ -349,6 +373,64 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K return attrs } +// ServerRequestMetrics returns metric attributes for an HTTP request received +// by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port". +func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { + // TODO: This currently does not add the specification required + // `http.target` attribute. It has too high of a cardinality to safely be + // added. An alternate should be added, or this comment removed, when it is + // addressed by the specification. If it is ultimately decided to continue + // not including the attribute, the HTTPTargetKey field of the httpConv + // should be removed as well. + + n := 4 // Method, scheme, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.methodMetric(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.flavor(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + return attrs +} + func (c *httpConv) method(method string) attribute.KeyValue { if method == "" { return c.HTTPMethodKey.String(http.MethodGet) @@ -356,6 +438,16 @@ func (c *httpConv) method(method string) attribute.KeyValue { return c.HTTPMethodKey.String(method) } +func (c *httpConv) methodMetric(method string) attribute.KeyValue { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return c.HTTPMethodKey.String(method) +} + func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive if https { return c.HTTPSchemeHTTPS @@ -363,7 +455,7 @@ func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive return c.HTTPSchemeHTTP } -func (c *httpConv) proto(proto string) attribute.KeyValue { +func (c *httpConv) flavor(proto string) attribute.KeyValue { switch proto { case "HTTP/1.0": return c.HTTPFlavorKey.String("1.0") diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 121c3d57c..6eace875c 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.43.0" + return "0.45.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index aa6993762..f3355c852 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -13,6 +13,7 @@ go.work.sum gen/ +/example/dice/dice /example/fib/fib /example/fib/traces.txt /example/jaeger/jaeger diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 61782fbf0..6e8eeec00 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -76,11 +76,6 @@ linters-settings: otlp-internal: files: - "!**/exporters/otlp/internal/**/*.go" - # TODO: remove the following when otlpmetric/internal is removed. - - "!**/exporters/otlp/otlpmetric/internal/oconf/envconfig.go" - - "!**/exporters/otlp/otlpmetric/internal/oconf/options.go" - - "!**/exporters/otlp/otlpmetric/internal/oconf/options_test.go" - - "!**/exporters/otlp/otlpmetric/internal/otest/client_test.go" deny: - pkg: "go.opentelemetry.io/otel/exporters/otlp/internal" desc: Do not use cross-module internal packages. diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 7aa5c8051..3e5c35b5d 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,71 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.19.0/0.42.0/0.0.7] 2023-09-28 + +This release contains the first stable release of the OpenTelemetry Go [metric SDK]. +Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Added + +- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539) +- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507) + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) +- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507) +- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +### Removed + +- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566) + +## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14 + +This is a release candidate for the v1.19.0/v0.42.0 release. +That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK. +See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. + +### Changed + +- Allow '/' characters in metric instrument names. (#4501) + +### Fixed + +- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499) + +## [1.18.0/0.41.0/0.0.6] 2023-09-12 + +This release drops the compatibility guarantee of [Go 1.19]. + +### Added + +- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473) +- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447) + +### Changed + +- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541). + The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470) + +### Removed + +- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467) +- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468) +- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469) +- Dropped guaranteed support for versions of Go less than 1.20. (#4481) + ## [1.17.0/0.40.0/0.0.5] 2023-08-28 ### Added @@ -2591,7 +2656,10 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.17.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.19.0...HEAD +[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 +[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1 +[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0 [1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0 [1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0 [1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1 @@ -2663,5 +2731,7 @@ It contains api and sdk for trace and meter. [Go 1.20]: https://go.dev/doc/go1.20 [Go 1.19]: https://go.dev/doc/go1.19 [Go 1.18]: https://go.dev/doc/go1.18 +[Go 1.19]: https://go.dev/doc/go1.19 [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric +[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index c996d227b..5c311706b 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -210,7 +210,7 @@ go-mod-tidy/%: DIR=$* go-mod-tidy/%: | crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ - && $(GO) mod tidy -compat=1.19 + && $(GO) mod tidy -compat=1.20 .PHONY: lint-modules lint-modules: go-mod-tidy diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 4e5531f30..634326ef8 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -55,19 +55,14 @@ Currently, this project supports the following environments. |---------|------------|--------------| | Ubuntu | 1.21 | amd64 | | Ubuntu | 1.20 | amd64 | -| Ubuntu | 1.19 | amd64 | | Ubuntu | 1.21 | 386 | | Ubuntu | 1.20 | 386 | -| Ubuntu | 1.19 | 386 | | MacOS | 1.21 | amd64 | | MacOS | 1.20 | amd64 | -| MacOS | 1.19 | amd64 | | Windows | 1.21 | amd64 | | Windows | 1.20 | amd64 | -| Windows | 1.19 | amd64 | | Windows | 1.21 | 386 | | Windows | 1.20 | 386 | -| Windows | 1.19 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt index 407f17489..ddff45468 100644 --- a/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.2.4 +codespell==2.2.5 diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index 3bce1b1e4..ad64e1996 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.17.0" + return "1.19.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 94f1c919e..7d2127692 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,19 +14,17 @@ module-sets: stable-v1: - version: v1.17.0 + version: v1.19.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing - go.opentelemetry.io/otel/bridge/opentracing/test + - go.opentelemetry.io/otel/example/dice - go.opentelemetry.io/otel/example/fib - - go.opentelemetry.io/otel/example/jaeger - go.opentelemetry.io/otel/example/namedtracer - go.opentelemetry.io/otel/example/otel-collector - go.opentelemetry.io/otel/example/passthrough - go.opentelemetry.io/otel/example/zipkin - - go.opentelemetry.io/otel/exporters/jaeger - - go.opentelemetry.io/otel/exporters/otlp/internal/retry - go.opentelemetry.io/otel/exporters/otlp/otlptrace - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp @@ -34,23 +32,23 @@ module-sets: - go.opentelemetry.io/otel/exporters/zipkin - go.opentelemetry.io/otel/metric - go.opentelemetry.io/otel/sdk + - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.40.0 + version: v0.42.0 modules: + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/example/view - go.opentelemetry.io/otel/exporters/otlp/otlpmetric - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/prometheus - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric - - go.opentelemetry.io/otel/sdk/metric - - go.opentelemetry.io/otel/bridge/opencensus - - go.opentelemetry.io/otel/bridge/opencensus/test - - go.opentelemetry.io/otel/example/view experimental-schema: - version: v0.0.5 + version: v0.0.7 modules: - go.opentelemetry.io/otel/schema excluded-modules: diff --git a/vendor/go.uber.org/goleak/.golangci.yml b/vendor/go.uber.org/goleak/.golangci.yml new file mode 100644 index 000000000..f84e6da8b --- /dev/null +++ b/vendor/go.uber.org/goleak/.golangci.yml @@ -0,0 +1,28 @@ +output: + # Make output more digestible with quickfix in vim/emacs/etc. + sort-results: true + print-issued-lines: false + +linters: + enable: + - gofumpt + - nolintlint + - revive + +linters-settings: + govet: + # These govet checks are disabled by default, but they're useful. + enable: + - niliness + - reflectvaluecompare + - sortslice + - unusedwrite + +issues: + # Print all issues reported by all linters. + max-issues-per-linter: 0 + max-same-issues: 0 + + # Don't ignore some of the issues that golangci-lint considers okay. + # This includes documenting all exported entities. + exclude-use-default: false diff --git a/vendor/go.uber.org/goleak/CHANGELOG.md b/vendor/go.uber.org/goleak/CHANGELOG.md index 530f0a573..5cd3f88a5 100644 --- a/vendor/go.uber.org/goleak/CHANGELOG.md +++ b/vendor/go.uber.org/goleak/CHANGELOG.md @@ -4,6 +4,21 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [1.3.0] +### Fixed +- Built-in ignores now match function names more accurately. + They will no longer ignore stacks because of file names + that look similar to function names. (#112) +### Added +- Add an `IgnoreAnyFunction` option to ignore stack traces + that have the provided function anywhere in the stack. (#113) +- Ignore `testing.runFuzzing` and `testing.runFuzzTests` alongside + other already-ignored test functions (`testing.RunTests`, etc). (#105) +### Changed +- Miscellaneous CI-related fixes. (#103, #108, #114) + +[1.3.0]: https://github.com/uber-go/goleak/compare/v1.2.1...v1.3.0 + ## [1.2.1] ### Changed - Drop golang/x/lint dependency. @@ -56,4 +71,4 @@ Thanks to @denis-tingajkin for their contributions to this release. ## [0.10.0] - Initial release. -[0.10.0]: https://github.com/uber-go/goleak/compare/v0.10.0...HEAD \ No newline at end of file +[0.10.0]: https://github.com/uber-go/goleak/compare/v0.10.0...HEAD diff --git a/vendor/go.uber.org/goleak/Makefile b/vendor/go.uber.org/goleak/Makefile index 8dbf72265..eb7154af3 100644 --- a/vendor/go.uber.org/goleak/Makefile +++ b/vendor/go.uber.org/goleak/Makefile @@ -1,19 +1,26 @@ -export GOBIN ?= $(shell pwd)/bin +# Directory containing the Makefile. +PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) -REVIVE = $(GOBIN)/revive +export GOBIN = $(PROJECT_ROOT)/bin +export PATH := $(GOBIN):$(PATH) -GO_FILES := $(shell \ - find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ - -o -name '*.go' -print | cut -b3-) +GO_FILES = $(shell find . \ + -path '*/.*' -prune -o \ + '(' -type f -a -name '*.go' ')' -print) + +# Additional test flags. +TEST_FLAGS ?= + +.PHONY: all +all: lint build test + +.PHONY: lint +lint: golangci-lint tidy-lint .PHONY: build build: go build ./... -.PHONY: install -install: - go mod download - .PHONY: test test: go test -v -race ./... @@ -24,18 +31,15 @@ cover: go test -race -coverprofile=cover.out -coverpkg=./... ./... go tool cover -html=cover.out -o cover.html -$(REVIVE): - cd tools && go install github.com/mgechev/revive +.PHONY: golangci-lint +golangci-lint: + golangci-lint run -.PHONY: lint -lint: $(REVIVE) - @rm -rf lint.log - @echo "Checking formatting..." - @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log - @echo "Checking vet..." - @go vet ./... 2>&1 | tee -a lint.log - @echo "Checking lint..." - @$(REVIVE) -set_exit_status ./... 2>&1 | tee -a lint.log - @echo "Checking for unresolved FIXMEs..." - @git grep -i fixme | grep -v -e '^vendor/' -e '^Makefile' | tee -a lint.log - @[ ! -s lint.log ] +.PHONY: tidy +tidy: + go mod tidy + +.PHONY: tidy-lint +tidy-lint: + go mod tidy + git diff --exit-code -- go.mod go.sum diff --git a/vendor/go.uber.org/goleak/README.md b/vendor/go.uber.org/goleak/README.md index a545b5e77..de3d7d51d 100644 --- a/vendor/go.uber.org/goleak/README.md +++ b/vendor/go.uber.org/goleak/README.md @@ -67,8 +67,8 @@ No breaking changes will be made to exported APIs before 2.0. [doc-img]: https://godoc.org/go.uber.org/goleak?status.svg [doc]: https://godoc.org/go.uber.org/goleak -[ci-img]: https://github.com/uber-go/goleak/actions/workflows/go.yml/badge.svg -[ci]: https://github.com/uber-go/goleak/actions/workflows/go.yml +[ci-img]: https://github.com/uber-go/goleak/actions/workflows/ci.yml/badge.svg +[ci]: https://github.com/uber-go/goleak/actions/workflows/ci.yml [cov-img]: https://codecov.io/gh/uber-go/goleak/branch/master/graph/badge.svg [cov]: https://codecov.io/gh/uber-go/goleak [release]: https://go.dev/doc/devel/release#policy diff --git a/vendor/go.uber.org/goleak/glide.yaml b/vendor/go.uber.org/goleak/glide.yaml deleted file mode 100644 index c6e7a00a0..000000000 --- a/vendor/go.uber.org/goleak/glide.yaml +++ /dev/null @@ -1,8 +0,0 @@ -package: go.uber.org/goleak -import: [] -testImport: -- package: github.com/stretchr/testify - version: ^1.1.4 - subpackages: - - assert - - require diff --git a/vendor/go.uber.org/goleak/internal/stack/scan.go b/vendor/go.uber.org/goleak/internal/stack/scan.go new file mode 100644 index 000000000..4b7ac8423 --- /dev/null +++ b/vendor/go.uber.org/goleak/internal/stack/scan.go @@ -0,0 +1,56 @@ +// Copyright (c) 2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package stack + +import ( + "bufio" + "io" +) + +// scanner provides a bufio.Scanner the ability to Unscan, +// which allows the current token to be read again +// after the next Scan. +type scanner struct { + *bufio.Scanner + + unscanned bool +} + +func newScanner(r io.Reader) *scanner { + return &scanner{Scanner: bufio.NewScanner(r)} +} + +func (s *scanner) Scan() bool { + if s.unscanned { + s.unscanned = false + return true + } + return s.Scanner.Scan() +} + +// Unscan stops the scanner from advancing its position +// for the next Scan. +// +// Bytes and Text will return the same token after next Scan +// that they do right now. +func (s *scanner) Unscan() { + s.unscanned = true +} diff --git a/vendor/go.uber.org/goleak/internal/stack/stacks.go b/vendor/go.uber.org/goleak/internal/stack/stacks.go index 94f82e4c0..241a9b844 100644 --- a/vendor/go.uber.org/goleak/internal/stack/stacks.go +++ b/vendor/go.uber.org/goleak/internal/stack/stacks.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2017-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -21,8 +21,8 @@ package stack import ( - "bufio" "bytes" + "errors" "fmt" "io" "runtime" @@ -34,10 +34,17 @@ const _defaultBufferSize = 64 * 1024 // 64 KiB // Stack represents a single Goroutine's stack. type Stack struct { - id int - state string + id int + state string // e.g. 'running', 'chan receive' + + // The first function on the stack. firstFunction string - fullStack *bytes.Buffer + + // A set of all functions in the stack, + allFunctions map[string]struct{} + + // Full, raw stack trace. + fullStack string } // ID returns the goroutine ID. @@ -52,7 +59,7 @@ func (s Stack) State() string { // Full returns the full stack trace for this goroutine. func (s Stack) Full() string { - return s.fullStack.String() + return s.fullStack } // FirstFunction returns the name of the first function on the stack. @@ -60,6 +67,13 @@ func (s Stack) FirstFunction() string { return s.firstFunction } +// HasFunction reports whether the stack has the given function +// anywhere in it. +func (s Stack) HasFunction(name string) bool { + _, ok := s.allFunctions[name] + return ok +} + func (s Stack) String() string { return fmt.Sprintf( "Goroutine %v in state %v, with %v on top of the stack:\n%s", @@ -67,47 +81,149 @@ func (s Stack) String() string { } func getStacks(all bool) []Stack { - var stacks []Stack - - var curStack *Stack - stackReader := bufio.NewReader(bytes.NewReader(getStackBuffer(all))) - for { - line, err := stackReader.ReadString('\n') - if err == io.EOF { - break - } - if err != nil { - // We're reading using bytes.NewReader which should never fail. - panic("bufio.NewReader failed on a fixed string") - } - - // If we see the goroutine header, start a new stack. - isFirstLine := false - if strings.HasPrefix(line, "goroutine ") { - // flush any previous stack - if curStack != nil { - stacks = append(stacks, *curStack) - } - id, goState := parseGoStackHeader(line) - curStack = &Stack{ - id: id, - state: goState, - fullStack: &bytes.Buffer{}, - } - isFirstLine = true - } - curStack.fullStack.WriteString(line) - if !isFirstLine && curStack.firstFunction == "" { - curStack.firstFunction = parseFirstFunc(line) - } - } - - if curStack != nil { - stacks = append(stacks, *curStack) + trace := getStackBuffer(all) + stacks, err := newStackParser(bytes.NewReader(trace)).Parse() + if err != nil { + // Well-formed stack traces should never fail to parse. + // If they do, it's a bug in this package. + // Panic so we can fix it. + panic(fmt.Sprintf("Failed to parse stack trace: %v\n%s", err, trace)) } return stacks } +type stackParser struct { + scan *scanner + stacks []Stack + errors []error +} + +func newStackParser(r io.Reader) *stackParser { + return &stackParser{ + scan: newScanner(r), + } +} + +func (p *stackParser) Parse() ([]Stack, error) { + for p.scan.Scan() { + line := p.scan.Text() + + // If we see the goroutine header, start a new stack. + if strings.HasPrefix(line, "goroutine ") { + stack, err := p.parseStack(line) + if err != nil { + p.errors = append(p.errors, err) + continue + } + p.stacks = append(p.stacks, stack) + } + } + + p.errors = append(p.errors, p.scan.Err()) + return p.stacks, errors.Join(p.errors...) +} + +// parseStack parses a single stack trace from the given scanner. +// line is the first line of the stack trace, which should look like: +// +// goroutine 123 [runnable]: +func (p *stackParser) parseStack(line string) (Stack, error) { + id, state, err := parseGoStackHeader(line) + if err != nil { + return Stack{}, fmt.Errorf("parse header: %w", err) + } + + // Read the rest of the stack trace. + var ( + firstFunction string + fullStack bytes.Buffer + ) + funcs := make(map[string]struct{}) + for p.scan.Scan() { + line := p.scan.Text() + if strings.HasPrefix(line, "goroutine ") { + // If we see the goroutine header, + // it's the end of this stack. + // Unscan so the next Scan sees the same line. + p.scan.Unscan() + break + } + + fullStack.WriteString(line) + fullStack.WriteByte('\n') // scanner trims the newline + + if len(line) == 0 { + // Empty line usually marks the end of the stack + // but we don't want to have to rely on that. + // Just skip it. + continue + } + + funcName, creator, err := parseFuncName(line) + if err != nil { + return Stack{}, fmt.Errorf("parse function: %w", err) + } + if !creator { + // A function is part of a goroutine's stack + // only if it's not a "created by" function. + // + // The creator function is part of a different stack. + // We don't care about it right now. + funcs[funcName] = struct{}{} + if firstFunction == "" { + firstFunction = funcName + } + + } + + // The function name followed by a line in the form: + // + // example.com/path/to/package/file.go:123 +0x123 + // + // We don't care about the position so we can skip this line. + if p.scan.Scan() { + // Be defensive: + // Skip the line only if it starts with a tab. + bs := p.scan.Bytes() + if len(bs) > 0 && bs[0] == '\t' { + fullStack.Write(bs) + fullStack.WriteByte('\n') + } else { + // Put it back and let the next iteration handle it + // if it doesn't start with a tab. + p.scan.Unscan() + } + } + + if creator { + // The "created by" line is the last line of the stack. + // We can stop parsing now. + // + // Note that if tracebackancestors=N is set, + // there may be more a traceback of the creator function + // following the "created by" line, + // but it should not be considered part of this stack. + // e.g., + // + // created by testing.(*T).Run in goroutine 1 + // /usr/lib/go/src/testing/testing.go:1648 +0x3ad + // [originating from goroutine 1]: + // testing.(*T).Run(...) + // /usr/lib/go/src/testing/testing.go:1649 +0x3ad + // + break + } + } + + return Stack{ + id: id, + state: state, + firstFunction: firstFunction, + allFunctions: funcs, + fullStack: fullStack.String(), + }, nil +} + // All returns the stacks for all running goroutines. func All() []Stack { return getStacks(true) @@ -127,29 +243,56 @@ func getStackBuffer(all bool) []byte { } } -func parseFirstFunc(line string) string { - line = strings.TrimSpace(line) - if idx := strings.LastIndex(line, "("); idx > 0 { - return line[:idx] +// Parses a single function from the given line. +// The line is in one of these formats: +// +// example.com/path/to/package.funcName(args...) +// example.com/path/to/package.(*typeName).funcName(args...) +// created by example.com/path/to/package.funcName +// created by example.com/path/to/package.funcName in goroutine [...] +// +// Also reports whether the line was a "created by" line. +func parseFuncName(line string) (name string, creator bool, err error) { + if after, ok := strings.CutPrefix(line, "created by "); ok { + // The function name is the part after "created by " + // and before " in goroutine [...]". + idx := strings.Index(after, " in goroutine") + if idx >= 0 { + after = after[:idx] + } + name = after + creator = true + } else if idx := strings.LastIndexByte(line, '('); idx >= 0 { + // The function name is the part before the last '('. + name = line[:idx] } - panic(fmt.Sprintf("function calls missing parents: %q", line)) + + if name == "" { + return "", false, fmt.Errorf("no function found: %q", line) + } + + return name, creator, nil } // parseGoStackHeader parses a stack header that looks like: // goroutine 643 [runnable]:\n // And returns the goroutine ID, and the state. -func parseGoStackHeader(line string) (goroutineID int, state string) { - line = strings.TrimSuffix(line, ":\n") +func parseGoStackHeader(line string) (goroutineID int, state string, err error) { + // The scanner will have already trimmed the "\n", + // but we'll guard against it just in case. + // + // Trimming them separately makes them both optional. + line = strings.TrimSuffix(strings.TrimSuffix(line, ":"), "\n") parts := strings.SplitN(line, " ", 3) if len(parts) != 3 { - panic(fmt.Sprintf("unexpected stack header format: %q", line)) + return 0, "", fmt.Errorf("unexpected format: %q", line) } id, err := strconv.Atoi(parts[1]) if err != nil { - panic(fmt.Sprintf("failed to parse goroutine ID: %v in line %q", parts[1], line)) + return 0, "", fmt.Errorf("bad goroutine ID %q in line %q", parts[1], line) } state = strings.TrimSuffix(strings.TrimPrefix(parts[2], "["), "]") - return id, state + return id, state, nil } diff --git a/vendor/go.uber.org/goleak/leaks.go b/vendor/go.uber.org/goleak/leaks.go index ee122b746..cc206f181 100644 --- a/vendor/go.uber.org/goleak/leaks.go +++ b/vendor/go.uber.org/goleak/leaks.go @@ -82,6 +82,12 @@ type testHelper interface { // tests by doing: // // defer VerifyNone(t) +// +// VerifyNone is currently incompatible with t.Parallel because it cannot +// associate specific goroutines with specific tests. Thus, non-leaking +// goroutines from other tests running in parallel could fail this check. +// If you need to run tests in parallel, use [VerifyTestMain] instead, +// which will verify that no leaking goroutines exist after ALL tests finish. func VerifyNone(t TestingT, options ...Option) { opts := buildOpts(options...) var cleanup func(int) diff --git a/vendor/go.uber.org/goleak/options.go b/vendor/go.uber.org/goleak/options.go index d2d473b71..53fc0a1de 100644 --- a/vendor/go.uber.org/goleak/options.go +++ b/vendor/go.uber.org/goleak/options.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2017-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -67,6 +67,22 @@ func IgnoreTopFunction(f string) Option { }) } +// IgnoreAnyFunction ignores goroutines where the specified function +// is present anywhere in the stack. +// +// The function name must be fully qualified, e.g., +// +// go.uber.org/goleak.IgnoreAnyFunction +// +// For methods, the fully qualified form looks like: +// +// go.uber.org/goleak.(*MyType).MyMethod +func IgnoreAnyFunction(f string) Option { + return addFilter(func(s stack.Stack) bool { + return s.HasFunction(f) + }) +} + // Cleanup sets up a cleanup function that will be executed at the // end of the leak check. // When passed to [VerifyTestMain], the exit code passed to cleanupFunc @@ -151,8 +167,12 @@ func isTestStack(s stack.Stack) bool { // Since go1.7, a separate goroutine is started to wait for signals. // T.Parallel is for parallel tests, which are blocked until all serial // tests have run with T.Parallel at the top of the stack. + // testing.runFuzzTests is for fuzz testing, it's blocked until the test + // function with all seed corpus have run. + // testing.runFuzzing is for fuzz testing, it's blocked until a failing + // input is found. switch s.FirstFunction() { - case "testing.RunTests", "testing.(*T).Run", "testing.(*T).Parallel": + case "testing.RunTests", "testing.(*T).Run", "testing.(*T).Parallel", "testing.runFuzzing", "testing.runFuzzTests": // In pre1.7 and post-1.7, background goroutines started by the testing // package are blocked waiting on a channel. return strings.HasPrefix(s.State(), "chan receive") @@ -163,7 +183,7 @@ func isTestStack(s stack.Stack) bool { func isSyscallStack(s stack.Stack) bool { // Typically runs in the background when code uses CGo: // https://github.com/golang/go/issues/16714 - return s.FirstFunction() == "runtime.goexit" && strings.HasPrefix(s.State(), "syscall") + return s.HasFunction("runtime.goexit") && strings.HasPrefix(s.State(), "syscall") } func isStdLibStack(s stack.Stack) bool { @@ -174,5 +194,5 @@ func isStdLibStack(s stack.Stack) bool { } // Using signal.Notify will start a runtime goroutine. - return strings.Contains(s.Full(), "runtime.ensureSigM") + return s.HasFunction("runtime.ensureSigM") } diff --git a/vendor/go.uber.org/goleak/tracestack_new.go b/vendor/go.uber.org/goleak/tracestack_new.go index d12ffd840..4fc6cefce 100644 --- a/vendor/go.uber.org/goleak/tracestack_new.go +++ b/vendor/go.uber.org/goleak/tracestack_new.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021 Uber Technologies, Inc. +// Copyright (c) 2021-2023 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal @@ -23,12 +23,8 @@ package goleak -import ( - "strings" - - "go.uber.org/goleak/internal/stack" -) +import "go.uber.org/goleak/internal/stack" func isTraceStack(s stack.Stack) bool { - return strings.Contains(s.Full(), "runtime.ReadTrace") + return s.HasFunction("runtime.ReadTrace") } diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml new file mode 100644 index 000000000..6d4d1be7b --- /dev/null +++ b/vendor/go.uber.org/multierr/.codecov.yml @@ -0,0 +1,15 @@ +coverage: + range: 80..100 + round: down + precision: 2 + + status: + project: # measuring the overall project coverage + default: # context, you can create multiple ones with custom titles + enabled: yes # must be yes|true to enable this status + target: 100 # specify the target coverage for each commit status + # option: "auto" (must increase from parent commit or pull request base) + # option: "X%" a static target percentage to hit + if_not_found: success # if parent is not found report status as success, error, or failure + if_ci_failed: error # if ci fails report status as success, error, or failure + diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore new file mode 100644 index 000000000..b9a05e3da --- /dev/null +++ b/vendor/go.uber.org/multierr/.gitignore @@ -0,0 +1,4 @@ +/vendor +cover.html +cover.out +/bin diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md new file mode 100644 index 000000000..f8177b978 --- /dev/null +++ b/vendor/go.uber.org/multierr/CHANGELOG.md @@ -0,0 +1,95 @@ +Releases +======== + +v1.11.0 (2023-03-28) +==================== +- `Errors` now supports any error that implements multiple-error + interface. +- Add `Every` function to allow checking if all errors in the chain + satisfies `errors.Is` against the target error. + +v1.10.0 (2023-03-08) +==================== + +- Comply with Go 1.20's multiple-error interface. +- Drop Go 1.18 support. + Per the support policy, only Go 1.19 and 1.20 are supported now. +- Drop all non-test external dependencies. + +v1.9.0 (2022-12-12) +=================== + +- Add `AppendFunc` that allow passsing functions to similar to + `AppendInvoke`. + +- Bump up yaml.v3 dependency to 3.0.1. + +v1.8.0 (2022-02-28) +=================== + +- `Combine`: perform zero allocations when there are no errors. + + +v1.7.0 (2021-05-06) +=================== + +- Add `AppendInvoke` to append into errors from `defer` blocks. + + +v1.6.0 (2020-09-14) +=================== + +- Actually drop library dependency on development-time tooling. + + +v1.5.0 (2020-02-24) +=================== + +- Drop library dependency on development-time tooling. + + +v1.4.0 (2019-11-04) +=================== + +- Add `AppendInto` function to more ergonomically build errors inside a + loop. + + +v1.3.0 (2019-10-29) +=================== + +- Switch to Go modules. + + +v1.2.0 (2019-09-26) +=================== + +- Support extracting and matching against wrapped errors with `errors.As` + and `errors.Is`. + + +v1.1.0 (2017-06-30) +=================== + +- Added an `Errors(error) []error` function to extract the underlying list of + errors for a multierr error. + + +v1.0.0 (2017-05-31) +=================== + +No changes since v0.2.0. This release is committing to making no breaking +changes to the current API in the 1.X series. + + +v0.2.0 (2017-04-11) +=================== + +- Repeatedly appending to the same error is now faster due to fewer + allocations. + + +v0.1.0 (2017-31-03) +=================== + +- Initial release diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt new file mode 100644 index 000000000..413e30f7c --- /dev/null +++ b/vendor/go.uber.org/multierr/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2017-2021 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile new file mode 100644 index 000000000..dcb6fe723 --- /dev/null +++ b/vendor/go.uber.org/multierr/Makefile @@ -0,0 +1,38 @@ +# Directory to put `go install`ed binaries in. +export GOBIN ?= $(shell pwd)/bin + +GO_FILES := $(shell \ + find . '(' -path '*/.*' -o -path './vendor' ')' -prune \ + -o -name '*.go' -print | cut -b3-) + +.PHONY: build +build: + go build ./... + +.PHONY: test +test: + go test -race ./... + +.PHONY: gofmt +gofmt: + $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX)) + @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true + @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false) + +.PHONY: golint +golint: + @cd tools && go install golang.org/x/lint/golint + @$(GOBIN)/golint ./... + +.PHONY: staticcheck +staticcheck: + @cd tools && go install honnef.co/go/tools/cmd/staticcheck + @$(GOBIN)/staticcheck ./... + +.PHONY: lint +lint: gofmt golint staticcheck + +.PHONY: cover +cover: + go test -race -coverprofile=cover.out -coverpkg=./... -v ./... + go tool cover -html=cover.out -o cover.html diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md new file mode 100644 index 000000000..5ab6ac40f --- /dev/null +++ b/vendor/go.uber.org/multierr/README.md @@ -0,0 +1,43 @@ +# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + +`multierr` allows combining one or more Go `error`s together. + +## Features + +- **Idiomatic**: + multierr follows best practices in Go, and keeps your code idiomatic. + - It keeps the underlying error type hidden, + allowing you to deal in `error` values exclusively. + - It provides APIs to safely append into an error from a `defer` statement. +- **Performant**: + multierr is optimized for performance: + - It avoids allocations where possible. + - It utilizes slice resizing semantics to optimize common cases + like appending into the same error object from a loop. +- **Interoperable**: + multierr interoperates with the Go standard library's error APIs seamlessly: + - The `errors.Is` and `errors.As` functions *just work*. +- **Lightweight**: + multierr comes with virtually no dependencies. + +## Installation + +```bash +go get -u go.uber.org/multierr@latest +``` + +## Status + +Stable: No breaking changes will be made before 2.0. + +------------------------------------------------------------------------------- + +Released under the [MIT License]. + +[MIT License]: LICENSE.txt +[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr +[doc]: https://pkg.go.dev/go.uber.org/multierr +[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg +[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg +[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml +[cov]: https://codecov.io/gh/uber-go/multierr diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go new file mode 100644 index 000000000..3a828b2df --- /dev/null +++ b/vendor/go.uber.org/multierr/error.go @@ -0,0 +1,646 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +// Package multierr allows combining one or more errors together. +// +// # Overview +// +// Errors can be combined with the use of the Combine function. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// conn.Close(), +// ) +// +// If only two errors are being combined, the Append function may be used +// instead. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The underlying list of errors for a returned error object may be retrieved +// with the Errors function. +// +// errors := multierr.Errors(err) +// if len(errors) > 0 { +// fmt.Println("The following errors occurred:", errors) +// } +// +// # Appending from a loop +// +// You sometimes need to append into an error from a loop. +// +// var err error +// for _, item := range items { +// err = multierr.Append(err, process(item)) +// } +// +// Cases like this may require knowledge of whether an individual instance +// failed. This usually requires introduction of a new variable. +// +// var err error +// for _, item := range items { +// if perr := process(item); perr != nil { +// log.Warn("skipping item", item) +// err = multierr.Append(err, perr) +// } +// } +// +// multierr includes AppendInto to simplify cases like this. +// +// var err error +// for _, item := range items { +// if multierr.AppendInto(&err, process(item)) { +// log.Warn("skipping item", item) +// } +// } +// +// This will append the error into the err variable, and return true if that +// individual error was non-nil. +// +// See [AppendInto] for more information. +// +// # Deferred Functions +// +// Go makes it possible to modify the return value of a function in a defer +// block if the function was using named returns. This makes it possible to +// record resource cleanup failures from deferred blocks. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer func() { +// err = multierr.Append(err, conn.Close()) +// }() +// // ... +// } +// +// multierr provides the Invoker type and AppendInvoke function to make cases +// like the above simpler and obviate the need for a closure. The following is +// roughly equivalent to the example above. +// +// func sendRequest(req Request) (err error) { +// conn, err := openConnection() +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(conn)) +// // ... +// } +// +// See [AppendInvoke] and [Invoker] for more information. +// +// NOTE: If you're modifying an error from inside a defer, you MUST use a named +// return value for that function. +// +// # Advanced Usage +// +// Errors returned by Combine and Append MAY implement the following +// interface. +// +// type errorGroup interface { +// // Returns a slice containing the underlying list of errors. +// // +// // This slice MUST NOT be modified by the caller. +// Errors() []error +// } +// +// Note that if you need access to list of errors behind a multierr error, you +// should prefer using the Errors function. That said, if you need cheap +// read-only access to the underlying errors slice, you can attempt to cast +// the error to this interface. You MUST handle the failure case gracefully +// because errors returned by Combine and Append are not guaranteed to +// implement this interface. +// +// var errors []error +// group, ok := err.(errorGroup) +// if ok { +// errors = group.Errors() +// } else { +// errors = []error{err} +// } +package multierr // import "go.uber.org/multierr" + +import ( + "bytes" + "errors" + "fmt" + "io" + "strings" + "sync" + "sync/atomic" +) + +var ( + // Separator for single-line error messages. + _singlelineSeparator = []byte("; ") + + // Prefix for multi-line messages + _multilinePrefix = []byte("the following errors occurred:") + + // Prefix for the first and following lines of an item in a list of + // multi-line error messages. + // + // For example, if a single item is: + // + // foo + // bar + // + // It will become, + // + // - foo + // bar + _multilineSeparator = []byte("\n - ") + _multilineIndent = []byte(" ") +) + +// _bufferPool is a pool of bytes.Buffers. +var _bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +type errorGroup interface { + Errors() []error +} + +// Errors returns a slice containing zero or more errors that the supplied +// error is composed of. If the error is nil, a nil slice is returned. +// +// err := multierr.Append(r.Close(), w.Close()) +// errors := multierr.Errors(err) +// +// If the error is not composed of other errors, the returned slice contains +// just the error that was passed in. +// +// Callers of this function are free to modify the returned slice. +func Errors(err error) []error { + return extractErrors(err) +} + +// multiError is an error that holds one or more errors. +// +// An instance of this is guaranteed to be non-empty and flattened. That is, +// none of the errors inside multiError are other multiErrors. +// +// multiError formats to a semi-colon delimited list of error messages with +// %v and with a more readable multi-line format with %+v. +type multiError struct { + copyNeeded atomic.Bool + errors []error +} + +// Errors returns the list of underlying errors. +// +// This slice MUST NOT be modified. +func (merr *multiError) Errors() []error { + if merr == nil { + return nil + } + return merr.errors +} + +func (merr *multiError) Error() string { + if merr == nil { + return "" + } + + buff := _bufferPool.Get().(*bytes.Buffer) + buff.Reset() + + merr.writeSingleline(buff) + + result := buff.String() + _bufferPool.Put(buff) + return result +} + +// Every compares every error in the given err against the given target error +// using [errors.Is], and returns true only if every comparison returned true. +func Every(err error, target error) bool { + for _, e := range extractErrors(err) { + if !errors.Is(e, target) { + return false + } + } + return true +} + +func (merr *multiError) Format(f fmt.State, c rune) { + if c == 'v' && f.Flag('+') { + merr.writeMultiline(f) + } else { + merr.writeSingleline(f) + } +} + +func (merr *multiError) writeSingleline(w io.Writer) { + first := true + for _, item := range merr.errors { + if first { + first = false + } else { + w.Write(_singlelineSeparator) + } + io.WriteString(w, item.Error()) + } +} + +func (merr *multiError) writeMultiline(w io.Writer) { + w.Write(_multilinePrefix) + for _, item := range merr.errors { + w.Write(_multilineSeparator) + writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item)) + } +} + +// Writes s to the writer with the given prefix added before each line after +// the first. +func writePrefixLine(w io.Writer, prefix []byte, s string) { + first := true + for len(s) > 0 { + if first { + first = false + } else { + w.Write(prefix) + } + + idx := strings.IndexByte(s, '\n') + if idx < 0 { + idx = len(s) - 1 + } + + io.WriteString(w, s[:idx+1]) + s = s[idx+1:] + } +} + +type inspectResult struct { + // Number of top-level non-nil errors + Count int + + // Total number of errors including multiErrors + Capacity int + + // Index of the first non-nil error in the list. Value is meaningless if + // Count is zero. + FirstErrorIdx int + + // Whether the list contains at least one multiError + ContainsMultiError bool +} + +// Inspects the given slice of errors so that we can efficiently allocate +// space for it. +func inspect(errors []error) (res inspectResult) { + first := true + for i, err := range errors { + if err == nil { + continue + } + + res.Count++ + if first { + first = false + res.FirstErrorIdx = i + } + + if merr, ok := err.(*multiError); ok { + res.Capacity += len(merr.errors) + res.ContainsMultiError = true + } else { + res.Capacity++ + } + } + return +} + +// fromSlice converts the given list of errors into a single error. +func fromSlice(errors []error) error { + // Don't pay to inspect small slices. + switch len(errors) { + case 0: + return nil + case 1: + return errors[0] + } + + res := inspect(errors) + switch res.Count { + case 0: + return nil + case 1: + // only one non-nil entry + return errors[res.FirstErrorIdx] + case len(errors): + if !res.ContainsMultiError { + // Error list is flat. Make a copy of it + // Otherwise "errors" escapes to the heap + // unconditionally for all other cases. + // This lets us optimize for the "no errors" case. + out := append(([]error)(nil), errors...) + return &multiError{errors: out} + } + } + + nonNilErrs := make([]error, 0, res.Capacity) + for _, err := range errors[res.FirstErrorIdx:] { + if err == nil { + continue + } + + if nested, ok := err.(*multiError); ok { + nonNilErrs = append(nonNilErrs, nested.errors...) + } else { + nonNilErrs = append(nonNilErrs, err) + } + } + + return &multiError{errors: nonNilErrs} +} + +// Combine combines the passed errors into a single error. +// +// If zero arguments were passed or if all items are nil, a nil error is +// returned. +// +// Combine(nil, nil) // == nil +// +// If only a single error was passed, it is returned as-is. +// +// Combine(err) // == err +// +// Combine skips over nil arguments so this function may be used to combine +// together errors from operations that fail independently of each other. +// +// multierr.Combine( +// reader.Close(), +// writer.Close(), +// pipe.Close(), +// ) +// +// If any of the passed errors is a multierr error, it will be flattened along +// with the other errors. +// +// multierr.Combine(multierr.Combine(err1, err2), err3) +// // is the same as +// multierr.Combine(err1, err2, err3) +// +// The returned error formats into a readable multi-line error message if +// formatted with %+v. +// +// fmt.Sprintf("%+v", multierr.Combine(err1, err2)) +func Combine(errors ...error) error { + return fromSlice(errors) +} + +// Append appends the given errors together. Either value may be nil. +// +// This function is a specialization of Combine for the common case where +// there are only two errors. +// +// err = multierr.Append(reader.Close(), writer.Close()) +// +// The following pattern may also be used to record failure of deferred +// operations without losing information about the original error. +// +// func doSomething(..) (err error) { +// f := acquireResource() +// defer func() { +// err = multierr.Append(err, f.Close()) +// }() +// +// Note that the variable MUST be a named return to append an error to it from +// the defer statement. See also [AppendInvoke]. +func Append(left error, right error) error { + switch { + case left == nil: + return right + case right == nil: + return left + } + + if _, ok := right.(*multiError); !ok { + if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) { + // Common case where the error on the left is constantly being + // appended to. + errs := append(l.errors, right) + return &multiError{errors: errs} + } else if !ok { + // Both errors are single errors. + return &multiError{errors: []error{left, right}} + } + } + + // Either right or both, left and right, are multiErrors. Rely on usual + // expensive logic. + errors := [2]error{left, right} + return fromSlice(errors[0:]) +} + +// AppendInto appends an error into the destination of an error pointer and +// returns whether the error being appended was non-nil. +// +// var err error +// multierr.AppendInto(&err, r.Close()) +// multierr.AppendInto(&err, w.Close()) +// +// The above is equivalent to, +// +// err := multierr.Append(r.Close(), w.Close()) +// +// As AppendInto reports whether the provided error was non-nil, it may be +// used to build a multierr error in a loop more ergonomically. For example: +// +// var err error +// for line := range lines { +// var item Item +// if multierr.AppendInto(&err, parse(line, &item)) { +// continue +// } +// items = append(items, item) +// } +// +// Compare this with a version that relies solely on Append: +// +// var err error +// for line := range lines { +// var item Item +// if parseErr := parse(line, &item); parseErr != nil { +// err = multierr.Append(err, parseErr) +// continue +// } +// items = append(items, item) +// } +func AppendInto(into *error, err error) (errored bool) { + if into == nil { + // We panic if 'into' is nil. This is not documented above + // because suggesting that the pointer must be non-nil may + // confuse users into thinking that the error that it points + // to must be non-nil. + panic("misuse of multierr.AppendInto: into pointer must not be nil") + } + + if err == nil { + return false + } + *into = Append(*into, err) + return true +} + +// Invoker is an operation that may fail with an error. Use it with +// AppendInvoke to append the result of calling the function into an error. +// This allows you to conveniently defer capture of failing operations. +// +// See also, [Close] and [Invoke]. +type Invoker interface { + Invoke() error +} + +// Invoke wraps a function which may fail with an error to match the Invoker +// interface. Use it to supply functions matching this signature to +// AppendInvoke. +// +// For example, +// +// func processReader(r io.Reader) (err error) { +// scanner := bufio.NewScanner(r) +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// for scanner.Scan() { +// // ... +// } +// // ... +// } +// +// In this example, the following line will construct the Invoker right away, +// but defer the invocation of scanner.Err() until the function returns. +// +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. +type Invoke func() error + +// Invoke calls the supplied function and returns its result. +func (i Invoke) Invoke() error { return i() } + +// Close builds an Invoker that closes the provided io.Closer. Use it with +// AppendInvoke to close io.Closers and append their results into an error. +// +// For example, +// +// func processFile(path string) (err error) { +// f, err := os.Open(path) +// if err != nil { +// return err +// } +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// return processReader(f) +// } +// +// In this example, multierr.Close will construct the Invoker right away, but +// defer the invocation of f.Close until the function returns. +// +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// Note that the error you're appending to from the defer statement MUST be a +// named return. +func Close(closer io.Closer) Invoker { + return Invoke(closer.Close) +} + +// AppendInvoke appends the result of calling the given Invoker into the +// provided error pointer. Use it with named returns to safely defer +// invocation of fallible operations until a function returns, and capture the +// resulting errors. +// +// func doSomething(...) (err error) { +// // ... +// f, err := openFile(..) +// if err != nil { +// return err +// } +// +// // multierr will call f.Close() when this function returns and +// // if the operation fails, its append its error into the +// // returned error. +// defer multierr.AppendInvoke(&err, multierr.Close(f)) +// +// scanner := bufio.NewScanner(f) +// // Similarly, this scheduled scanner.Err to be called and +// // inspected when the function returns and append its error +// // into the returned error. +// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err)) +// +// // ... +// } +// +// NOTE: If used with a defer, the error variable MUST be a named return. +// +// Without defer, AppendInvoke behaves exactly like AppendInto. +// +// err := // ... +// multierr.AppendInvoke(&err, mutltierr.Invoke(foo)) +// +// // ...is roughly equivalent to... +// +// err := // ... +// multierr.AppendInto(&err, foo()) +// +// The advantage of the indirection introduced by Invoker is to make it easy +// to defer the invocation of a function. Without this indirection, the +// invoked function will be evaluated at the time of the defer block rather +// than when the function returns. +// +// // BAD: This is likely not what the caller intended. This will evaluate +// // foo() right away and append its result into the error when the +// // function returns. +// defer multierr.AppendInto(&err, foo()) +// +// // GOOD: This will defer invocation of foo unutil the function returns. +// defer multierr.AppendInvoke(&err, multierr.Invoke(foo)) +// +// multierr provides a few Invoker implementations out of the box for +// convenience. See [Invoker] for more information. +func AppendInvoke(into *error, invoker Invoker) { + AppendInto(into, invoker.Invoke()) +} + +// AppendFunc is a shorthand for [AppendInvoke]. +// It allows using function or method value directly +// without having to wrap it into an [Invoker] interface. +// +// func doSomething(...) (err error) { +// w, err := startWorker(...) +// if err != nil { +// return err +// } +// +// // multierr will call w.Stop() when this function returns and +// // if the operation fails, it appends its error into the +// // returned error. +// defer multierr.AppendFunc(&err, w.Stop) +// } +func AppendFunc(into *error, fn func() error) { + AppendInvoke(into, Invoke(fn)) +} diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go new file mode 100644 index 000000000..a173f9c25 --- /dev/null +++ b/vendor/go.uber.org/multierr/error_post_go120.go @@ -0,0 +1,48 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build go1.20 +// +build go1.20 + +package multierr + +// Unwrap returns a list of errors wrapped by this multierr. +func (merr *multiError) Unwrap() []error { + return merr.Errors() +} + +type multipleErrors interface { + Unwrap() []error +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // check if the given err is an Unwrapable error that + // implements multipleErrors interface. + eg, ok := err.(multipleErrors) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Unwrap()...) +} diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go new file mode 100644 index 000000000..93872a3fc --- /dev/null +++ b/vendor/go.uber.org/multierr/error_pre_go120.go @@ -0,0 +1,79 @@ +// Copyright (c) 2017-2023 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:build !go1.20 +// +build !go1.20 + +package multierr + +import "errors" + +// Versions of Go before 1.20 did not support the Unwrap() []error method. +// This provides a similar behavior by implementing the Is(..) and As(..) +// methods. +// See the errors.Join proposal for details: +// https://github.com/golang/go/issues/53435 + +// As attempts to find the first error in the error list that matches the type +// of the value that target points to. +// +// This function allows errors.As to traverse the values stored on the +// multierr error. +func (merr *multiError) As(target interface{}) bool { + for _, err := range merr.Errors() { + if errors.As(err, target) { + return true + } + } + return false +} + +// Is attempts to match the provided error against errors in the error list. +// +// This function allows errors.Is to traverse the values stored on the +// multierr error. +func (merr *multiError) Is(target error) bool { + for _, err := range merr.Errors() { + if errors.Is(err, target) { + return true + } + } + return false +} + +func extractErrors(err error) []error { + if err == nil { + return nil + } + + // Note that we're casting to multiError, not errorGroup. Our contract is + // that returned errors MAY implement errorGroup. Errors, however, only + // has special behavior for multierr-specific error objects. + // + // This behavior can be expanded in the future but I think it's prudent to + // start with as little as possible in terms of contract and possibility + // of misuse. + eg, ok := err.(*multiError) + if !ok { + return []error{err} + } + + return append(([]error)(nil), eg.Errors()...) +} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go index 94c71ac1a..5dfacbb98 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.11 && gc && !purego -// +build go1.11,gc,!purego +//go:build gc && !purego +// +build gc,!purego package chacha20 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s index 63cae9e6f..f1f66230d 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.11 && gc && !purego -// +build go1.11,gc,!purego +//go:build gc && !purego +// +build gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go index 025b49897..02ff3d05e 100644 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego -// +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego +//go:build (!arm64 && !s390x && !ppc64le) || !gc || purego +// +build !arm64,!s390x,!ppc64le !gc purego package chacha20 diff --git a/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/golang.org/x/crypto/cryptobyte/builder.go index c05ac7d16..cf254f5f1 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/builder.go +++ b/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -95,6 +95,11 @@ func (b *Builder) AddUint32(v uint32) { b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) } +// AddUint48 appends a big-endian, 48-bit value to the byte string. +func (b *Builder) AddUint48(v uint64) { + b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + // AddUint64 appends a big-endian, 64-bit value to the byte string. func (b *Builder) AddUint64(v uint64) { b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go index 0531a3d6f..10692a8a3 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/string.go +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -81,6 +81,17 @@ func (s *String) ReadUint32(out *uint32) bool { return true } +// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint48(out *uint64) bool { + v := s.read(6) + if v == nil { + return false + } + *out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5]) + return true +} + // ReadUint64 decodes a big-endian, 64-bit value into out and advances over it. // It reports whether the read was successful. func (s *String) ReadUint64(out *uint64) bool { diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index cf66309c4..000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 2cb9c408f..000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index 64d31ecc3..000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 7b6b68511..000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index 1f9715341..000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile deleted file mode 100644 index 851224595..000000000 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget https://curl.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile deleted file mode 100644 index 55fd826f7..000000000 --- a/vendor/golang.org/x/net/http2/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 033b6e6db..02c88b6b3 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -581,9 +581,11 @@ type serverConn struct { advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client curClientStreams uint32 // number of open streams initiated by the client curPushedStreams uint32 // number of open streams initiated by server push + curHandlers uint32 // number of running handler goroutines maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes streams map[uint32]*stream + unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) @@ -981,6 +983,8 @@ func (sc *serverConn) serve() { return case gracefulShutdownMsg: sc.startGracefulShutdownInternal() + case handlerDoneMsg: + sc.handlerDone() default: panic("unknown timer") } @@ -1012,14 +1016,6 @@ func (sc *serverConn) serve() { } } -func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { - select { - case <-sc.doneServing: - case <-sharedCh: - close(privateCh) - } -} - type serverMessage int // Message values sent to serveMsgCh. @@ -1028,6 +1024,7 @@ var ( idleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) + handlerDoneMsg = new(serverMessage) ) func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } @@ -1900,9 +1897,11 @@ func (st *stream) copyTrailersToHandlerRequest() { // onReadTimeout is run on its own goroutine (from time.AfterFunc) // when the stream's ReadTimeout has fired. func (st *stream) onReadTimeout() { - // Wrap the ErrDeadlineExceeded to avoid callers depending on us - // returning the bare error. - st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) + if st.body != nil { + // Wrap the ErrDeadlineExceeded to avoid callers depending on us + // returning the bare error. + st.body.CloseWithError(fmt.Errorf("%w", os.ErrDeadlineExceeded)) + } } // onWriteTimeout is run on its own goroutine (from time.AfterFunc) @@ -2020,13 +2019,10 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout != 0 { sc.conn.SetReadDeadline(time.Time{}) - if st.body != nil { - st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) - } + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } - go sc.runHandler(rw, req, handler) - return nil + return sc.scheduleHandler(id, rw, req, handler) } func (sc *serverConn) upgradeRequest(req *http.Request) { @@ -2046,6 +2042,10 @@ func (sc *serverConn) upgradeRequest(req *http.Request) { sc.conn.SetReadDeadline(time.Time{}) } + // This is the first request on the connection, + // so start the handler directly rather than going + // through scheduleHandler. + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) } @@ -2286,8 +2286,62 @@ func (sc *serverConn) newResponseWriter(st *stream, req *http.Request) *response return &responseWriter{rws: rws} } +type unstartedHandler struct { + streamID uint32 + rw *responseWriter + req *http.Request + handler func(http.ResponseWriter, *http.Request) +} + +// scheduleHandler starts a handler goroutine, +// or schedules one to start as soon as an existing handler finishes. +func (sc *serverConn) scheduleHandler(streamID uint32, rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) error { + sc.serveG.check() + maxHandlers := sc.advMaxStreams + if sc.curHandlers < maxHandlers { + sc.curHandlers++ + go sc.runHandler(rw, req, handler) + return nil + } + if len(sc.unstartedHandlers) > int(4*sc.advMaxStreams) { + return sc.countError("too_many_early_resets", ConnectionError(ErrCodeEnhanceYourCalm)) + } + sc.unstartedHandlers = append(sc.unstartedHandlers, unstartedHandler{ + streamID: streamID, + rw: rw, + req: req, + handler: handler, + }) + return nil +} + +func (sc *serverConn) handlerDone() { + sc.serveG.check() + sc.curHandlers-- + i := 0 + maxHandlers := sc.advMaxStreams + for ; i < len(sc.unstartedHandlers); i++ { + u := sc.unstartedHandlers[i] + if sc.streams[u.streamID] == nil { + // This stream was reset before its goroutine had a chance to start. + continue + } + if sc.curHandlers >= maxHandlers { + break + } + sc.curHandlers++ + go sc.runHandler(u.rw, u.req, u.handler) + sc.unstartedHandlers[i] = unstartedHandler{} // don't retain references + } + sc.unstartedHandlers = sc.unstartedHandlers[i:] + if len(sc.unstartedHandlers) == 0 { + sc.unstartedHandlers = nil + } +} + // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { + defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { rw.rws.stream.cancelCtx() diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index b0d482f9f..4515b22c4 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -291,8 +291,7 @@ func (t *Transport) initConnPool() { // HTTP/2 server. type ClientConn struct { t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tconnClosed bool + tconn net.Conn // usually *tls.Conn, except specialized impls tlsState *tls.ConnectionState // nil only for specialized impls reused uint32 // whether conn is being reused; atomic singleUse bool // whether being used for a single http.Request diff --git a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go index 7a0b9ed10..2459d069f 100644 --- a/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go +++ b/vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go @@ -47,6 +47,10 @@ type Config struct { // client ID & client secret sent. The zero value means to // auto-detect. AuthStyle oauth2.AuthStyle + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // Token uses client credentials to retrieve a token. @@ -103,7 +107,7 @@ func (c *tokenSource) Token() (*oauth2.Token, error) { v[k] = p } - tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v, internal.AuthStyle(c.conf.AuthStyle)) + tk, err := internal.RetrieveToken(c.ctx, c.conf.ClientID, c.conf.ClientSecret, c.conf.TokenURL, v, internal.AuthStyle(c.conf.AuthStyle), c.conf.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*oauth2.RetrieveError)(rErr) diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go new file mode 100644 index 000000000..e99c92f39 --- /dev/null +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -0,0 +1,198 @@ +package oauth2 + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 +const ( + errAuthorizationPending = "authorization_pending" + errSlowDown = "slow_down" + errAccessDenied = "access_denied" + errExpiredToken = "expired_token" +) + +// DeviceAuthResponse describes a successful RFC 8628 Device Authorization Response +// https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 +type DeviceAuthResponse struct { + // DeviceCode + DeviceCode string `json:"device_code"` + // UserCode is the code the user should enter at the verification uri + UserCode string `json:"user_code"` + // VerificationURI is where user should enter the user code + VerificationURI string `json:"verification_uri"` + // VerificationURIComplete (if populated) includes the user code in the verification URI. This is typically shown to the user in non-textual form, such as a QR code. + VerificationURIComplete string `json:"verification_uri_complete,omitempty"` + // Expiry is when the device code and user code expire + Expiry time.Time `json:"expires_in,omitempty"` + // Interval is the duration in seconds that Poll should wait between requests + Interval int64 `json:"interval,omitempty"` +} + +func (d DeviceAuthResponse) MarshalJSON() ([]byte, error) { + type Alias DeviceAuthResponse + var expiresIn int64 + if !d.Expiry.IsZero() { + expiresIn = int64(time.Until(d.Expiry).Seconds()) + } + return json.Marshal(&struct { + ExpiresIn int64 `json:"expires_in,omitempty"` + *Alias + }{ + ExpiresIn: expiresIn, + Alias: (*Alias)(&d), + }) + +} + +func (c *DeviceAuthResponse) UnmarshalJSON(data []byte) error { + type Alias DeviceAuthResponse + aux := &struct { + ExpiresIn int64 `json:"expires_in"` + // workaround misspelling of verification_uri + VerificationURL string `json:"verification_url"` + *Alias + }{ + Alias: (*Alias)(c), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + if aux.ExpiresIn != 0 { + c.Expiry = time.Now().UTC().Add(time.Second * time.Duration(aux.ExpiresIn)) + } + if c.VerificationURI == "" { + c.VerificationURI = aux.VerificationURL + } + return nil +} + +// DeviceAuth returns a device auth struct which contains a device code +// and authorization information provided for users to enter on another device. +func (c *Config) DeviceAuth(ctx context.Context, opts ...AuthCodeOption) (*DeviceAuthResponse, error) { + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.1 + v := url.Values{ + "client_id": {c.ClientID}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveDeviceAuth(ctx, c, v) +} + +func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAuthResponse, error) { + if c.Endpoint.DeviceAuthURL == "" { + return nil, errors.New("endpoint missing DeviceAuthURL") + } + + req, err := http.NewRequest("POST", c.Endpoint.DeviceAuthURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + req.Header.Set("Accept", "application/json") + + t := time.Now() + r, err := internal.ContextClient(ctx).Do(req) + if err != nil { + return nil, err + } + + body, err := io.ReadAll(io.LimitReader(r.Body, 1<<20)) + if err != nil { + return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + da := &DeviceAuthResponse{} + err = json.Unmarshal(body, &da) + if err != nil { + return nil, fmt.Errorf("unmarshal %s", err) + } + + if !da.Expiry.IsZero() { + // Make a small adjustment to account for time taken by the request + da.Expiry = da.Expiry.Add(-time.Since(t)) + } + + return da, nil +} + +// DeviceAccessToken polls the server to exchange a device code for a token. +func (c *Config) DeviceAccessToken(ctx context.Context, da *DeviceAuthResponse, opts ...AuthCodeOption) (*Token, error) { + if !da.Expiry.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, da.Expiry) + defer cancel() + } + + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.4 + v := url.Values{ + "client_id": {c.ClientID}, + "grant_type": {"urn:ietf:params:oauth:grant-type:device_code"}, + "device_code": {da.DeviceCode}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + for _, opt := range opts { + opt.setValue(v) + } + + // "If no value is provided, clients MUST use 5 as the default." + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.2 + interval := da.Interval + if interval == 0 { + interval = 5 + } + + ticker := time.NewTicker(time.Duration(interval) * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-ticker.C: + tok, err := retrieveToken(ctx, c, v) + if err == nil { + return tok, nil + } + + e, ok := err.(*RetrieveError) + if !ok { + return nil, err + } + switch e.ErrorCode { + case errSlowDown: + // https://datatracker.ietf.org/doc/html/rfc8628#section-3.5 + // "the interval MUST be increased by 5 seconds for this and all subsequent requests" + interval += 5 + ticker.Reset(time.Duration(interval) * time.Second) + case errAuthorizationPending: + // Do nothing. + case errAccessDenied, errExpiredToken: + fallthrough + default: + return tok, err + } + } + } +} diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index 2cf71f0f9..12b12a30c 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -19,7 +19,10 @@ import ( "golang.org/x/oauth2/authhandler" ) -const adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" +const ( + adcSetupURL = "https://cloud.google.com/docs/authentication/external/set-up-adc" + universeDomainDefault = "googleapis.com" +) // Credentials holds Google credentials, including "Application Default Credentials". // For more details, see: @@ -37,6 +40,18 @@ type Credentials struct { // environment and not with a credentials file, e.g. when code is // running on Google Cloud Platform. JSON []byte + + // universeDomain is the default service domain for a given Cloud universe. + universeDomain string +} + +// UniverseDomain returns the default service domain for a given Cloud universe. +// The default value is "googleapis.com". +func (c *Credentials) UniverseDomain() string { + if c.universeDomain == "" { + return universeDomainDefault + } + return c.universeDomain } // DefaultCredentials is the old name of Credentials. @@ -200,15 +215,23 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params if err := json.Unmarshal(jsonData, &f); err != nil { return nil, err } + + universeDomain := f.UniverseDomain + // Authorized user credentials are only supported in the googleapis.com universe. + if f.Type == userCredentialsKey { + universeDomain = universeDomainDefault + } + ts, err := f.tokenSource(ctx, params) if err != nil { return nil, err } ts = newErrWrappingTokenSource(ts) return &Credentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: jsonData, + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + universeDomain: universeDomain, }, nil } diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index cc1223889..c66c53527 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -16,14 +16,16 @@ import ( "cloud.google.com/go/compute/metadata" "golang.org/x/oauth2" "golang.org/x/oauth2/google/internal/externalaccount" + "golang.org/x/oauth2/google/internal/externalaccountauthorizeduser" "golang.org/x/oauth2/jwt" ) // Endpoint is Google's OAuth 2.0 default endpoint. var Endpoint = oauth2.Endpoint{ - AuthURL: "https://accounts.google.com/o/oauth2/auth", - TokenURL: "https://oauth2.googleapis.com/token", - AuthStyle: oauth2.AuthStyleInParams, + AuthURL: "https://accounts.google.com/o/oauth2/auth", + TokenURL: "https://oauth2.googleapis.com/token", + DeviceAuthURL: "https://oauth2.googleapis.com/device/code", + AuthStyle: oauth2.AuthStyleInParams, } // MTLSTokenURL is Google's OAuth 2.0 default mTLS endpoint. @@ -95,10 +97,11 @@ func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) { // JSON key file types. const ( - serviceAccountKey = "service_account" - userCredentialsKey = "authorized_user" - externalAccountKey = "external_account" - impersonatedServiceAccount = "impersonated_service_account" + serviceAccountKey = "service_account" + userCredentialsKey = "authorized_user" + externalAccountKey = "external_account" + externalAccountAuthorizedUserKey = "external_account_authorized_user" + impersonatedServiceAccount = "impersonated_service_account" ) // credentialsFile is the unmarshalled representation of a credentials file. @@ -106,12 +109,13 @@ type credentialsFile struct { Type string `json:"type"` // Service Account fields - ClientEmail string `json:"client_email"` - PrivateKeyID string `json:"private_key_id"` - PrivateKey string `json:"private_key"` - AuthURL string `json:"auth_uri"` - TokenURL string `json:"token_uri"` - ProjectID string `json:"project_id"` + ClientEmail string `json:"client_email"` + PrivateKeyID string `json:"private_key_id"` + PrivateKey string `json:"private_key"` + AuthURL string `json:"auth_uri"` + TokenURL string `json:"token_uri"` + ProjectID string `json:"project_id"` + UniverseDomain string `json:"universe_domain"` // User Credential fields // (These typically come from gcloud auth.) @@ -131,6 +135,9 @@ type credentialsFile struct { QuotaProjectID string `json:"quota_project_id"` WorkforcePoolUserProject string `json:"workforce_pool_user_project"` + // External Account Authorized User fields + RevokeURL string `json:"revoke_url"` + // Service account impersonation SourceCredentials *credentialsFile `json:"source_credentials"` } @@ -199,6 +206,19 @@ func (f *credentialsFile) tokenSource(ctx context.Context, params CredentialsPar WorkforcePoolUserProject: f.WorkforcePoolUserProject, } return cfg.TokenSource(ctx) + case externalAccountAuthorizedUserKey: + cfg := &externalaccountauthorizeduser.Config{ + Audience: f.Audience, + RefreshToken: f.RefreshToken, + TokenURL: f.TokenURLExternal, + TokenInfoURL: f.TokenInfoURL, + ClientID: f.ClientID, + ClientSecret: f.ClientSecret, + RevokeURL: f.RevokeURL, + QuotaProjectID: f.QuotaProjectID, + Scopes: params.Scopes, + } + return cfg.TokenSource(ctx) case impersonatedServiceAccount: if f.ServiceAccountImpersonationURL == "" || f.SourceCredentials == nil { return nil, errors.New("missing 'source_credentials' field or 'service_account_impersonation_url' in credentials") diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index 2bf3202b2..bd4efd19b 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -274,49 +274,6 @@ type awsRequest struct { Headers []awsRequestHeader `json:"headers"` } -func (cs awsCredentialSource) validateMetadataServers() error { - if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil { - return err - } - if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil { - return err - } - return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url") -} - -var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"} - -func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool { - if metadataUrl == "" { - // Zero value means use default, which is valid. - return true - } - - u, err := url.Parse(metadataUrl) - if err != nil { - // Unparseable URL means invalid - return false - } - - for _, validHostname := range validHostnames { - if u.Hostname() == validHostname { - // If it's one of the valid hostnames, everything is good - return true - } - } - - // hostname not found in our allowlist, so not valid - return false -} - -func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error { - if !cs.isValidMetadataServer(metadataUrl) { - return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName) - } - - return nil -} - func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { if cs.client == nil { cs.client = oauth2.NewClient(cs.ctx, nil) @@ -339,6 +296,10 @@ func shouldUseMetadataServer() bool { return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() } +func (cs awsCredentialSource) credentialSourceType() string { + return "aws" +} + func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { headers := make(map[string]string) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index dcd252a61..33288d367 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -8,13 +8,12 @@ import ( "context" "fmt" "net/http" - "net/url" "regexp" "strconv" - "strings" "time" "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" ) // now aliases time.Now for testing @@ -63,31 +62,10 @@ type Config struct { WorkforcePoolUserProject string } -// Each element consists of a list of patterns. validateURLs checks for matches -// that include all elements in a given list, in that order. - var ( validWorkforceAudiencePattern *regexp.Regexp = regexp.MustCompile(`//iam\.googleapis\.com/locations/[^/]+/workforcePools/`) ) -func validateURL(input string, patterns []*regexp.Regexp, scheme string) bool { - parsed, err := url.Parse(input) - if err != nil { - return false - } - if !strings.EqualFold(parsed.Scheme, scheme) { - return false - } - toTest := parsed.Host - - for _, pattern := range patterns { - if pattern.MatchString(toTest) { - return true - } - } - return false -} - func validateWorkforceAudience(input string) bool { return validWorkforceAudiencePattern.MatchString(input) } @@ -185,10 +163,6 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL } - if err := awsCredSource.validateMetadataServers(); err != nil { - return nil, err - } - return awsCredSource, nil } } else if c.CredentialSource.File != "" { @@ -202,6 +176,7 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { } type baseCredentialSource interface { + credentialSourceType() string subjectToken() (string, error) } @@ -211,6 +186,15 @@ type tokenSource struct { conf *Config } +func getMetricsHeaderValue(conf *Config, credSource baseCredentialSource) string { + return fmt.Sprintf("gl-go/%s auth/%s google-byoid-sdk source/%s sa-impersonation/%t config-lifetime/%t", + goVersion(), + "unknown", + credSource.credentialSourceType(), + conf.ServiceAccountImpersonationURL != "", + conf.ServiceAccountImpersonationLifetimeSeconds != 0) +} + // Token allows tokenSource to conform to the oauth2.TokenSource interface. func (ts tokenSource) Token() (*oauth2.Token, error) { conf := ts.conf @@ -224,7 +208,7 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { if err != nil { return nil, err } - stsRequest := stsTokenExchangeRequest{ + stsRequest := stsexchange.TokenExchangeRequest{ GrantType: "urn:ietf:params:oauth:grant-type:token-exchange", Audience: conf.Audience, Scope: conf.Scopes, @@ -234,7 +218,8 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { } header := make(http.Header) header.Add("Content-Type", "application/x-www-form-urlencoded") - clientAuth := clientAuthentication{ + header.Add("x-goog-api-client", getMetricsHeaderValue(conf, credSource)) + clientAuth := stsexchange.ClientAuthentication{ AuthStyle: oauth2.AuthStyleInHeader, ClientID: conf.ClientID, ClientSecret: conf.ClientSecret, @@ -247,7 +232,7 @@ func (ts tokenSource) Token() (*oauth2.Token, error) { "userProject": conf.WorkforcePoolUserProject, } } - stsResp, err := exchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) + stsResp, err := stsexchange.ExchangeToken(ts.ctx, conf.TokenURL, &stsRequest, clientAuth, header, options) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go index 579bcce5f..6497dc022 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/executablecredsource.go @@ -233,6 +233,10 @@ func (cs executableCredentialSource) parseSubjectTokenFromSource(response []byte return "", tokenTypeError(source) } +func (cs executableCredentialSource) credentialSourceType() string { + return "executable" +} + func (cs executableCredentialSource) subjectToken() (string, error) { if token, err := cs.getTokenFromOutputFile(); token != "" || err != nil { return token, err diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go index e953ddb47..f35f73c5c 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/filecredsource.go @@ -19,6 +19,10 @@ type fileCredentialSource struct { Format format } +func (cs fileCredentialSource) credentialSourceType() string { + return "file" +} + func (cs fileCredentialSource) subjectToken() (string, error) { tokenFile, err := os.Open(cs.File) if err != nil { diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go new file mode 100644 index 000000000..1d5aad2e2 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/header.go @@ -0,0 +1,64 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccount + +import ( + "runtime" + "strings" + "unicode" +) + +var ( + // version is a package internal global variable for testing purposes. + version = runtime.Version +) + +// versionUnknown is only used when the runtime version cannot be determined. +const versionUnknown = "UNKNOWN" + +// goVersion returns a Go runtime version derived from the runtime environment +// that is modified to be suitable for reporting in a header, meaning it has no +// whitespace. If it is unable to determine the Go runtime version, it returns +// versionUnknown. +func goVersion() string { + const develPrefix = "devel +" + + s := version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } else if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + // Some release candidates already have a dash in them. + if !strings.HasPrefix(prerelease, "-") { + prerelease = "-" + prerelease + } + s += prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go index 16dca6541..606bb4e80 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/urlcredsource.go @@ -23,6 +23,10 @@ type urlCredentialSource struct { ctx context.Context } +func (cs urlCredentialSource) credentialSourceType() string { + return "url" +} + func (cs urlCredentialSource) subjectToken() (string, error) { client := oauth2.NewClient(cs.ctx, nil) req, err := http.NewRequest("GET", cs.URL, nil) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go new file mode 100644 index 000000000..cb5820707 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccountauthorizeduser/externalaccountauthorizeduser.go @@ -0,0 +1,114 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package externalaccountauthorizeduser + +import ( + "context" + "errors" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google/internal/stsexchange" +) + +// now aliases time.Now for testing. +var now = func() time.Time { + return time.Now().UTC() +} + +var tokenValid = func(token oauth2.Token) bool { + return token.Valid() +} + +type Config struct { + // Audience is the Secure Token Service (STS) audience which contains the resource name for the workforce pool and + // the provider identifier in that pool. + Audience string + // RefreshToken is the optional OAuth 2.0 refresh token. If specified, credentials can be refreshed. + RefreshToken string + // TokenURL is the optional STS token exchange endpoint for refresh. Must be specified for refresh, can be left as + // None if the token can not be refreshed. + TokenURL string + // TokenInfoURL is the optional STS endpoint URL for token introspection. + TokenInfoURL string + // ClientID is only required in conjunction with ClientSecret, as described above. + ClientID string + // ClientSecret is currently only required if token_info endpoint also needs to be called with the generated GCP + // access token. When provided, STS will be called with additional basic authentication using client_id as username + // and client_secret as password. + ClientSecret string + // Token is the OAuth2.0 access token. Can be nil if refresh information is provided. + Token string + // Expiry is the optional expiration datetime of the OAuth 2.0 access token. + Expiry time.Time + // RevokeURL is the optional STS endpoint URL for revoking tokens. + RevokeURL string + // QuotaProjectID is the optional project ID used for quota and billing. This project may be different from the + // project used to create the credentials. + QuotaProjectID string + Scopes []string +} + +func (c *Config) canRefresh() bool { + return c.ClientID != "" && c.ClientSecret != "" && c.RefreshToken != "" && c.TokenURL != "" +} + +func (c *Config) TokenSource(ctx context.Context) (oauth2.TokenSource, error) { + var token oauth2.Token + if c.Token != "" && !c.Expiry.IsZero() { + token = oauth2.Token{ + AccessToken: c.Token, + Expiry: c.Expiry, + TokenType: "Bearer", + } + } + if !tokenValid(token) && !c.canRefresh() { + return nil, errors.New("oauth2/google: Token should be created with fields to make it valid (`token` and `expiry`), or fields to allow it to refresh (`refresh_token`, `token_url`, `client_id`, `client_secret`).") + } + + ts := tokenSource{ + ctx: ctx, + conf: c, + } + + return oauth2.ReuseTokenSource(&token, ts), nil +} + +type tokenSource struct { + ctx context.Context + conf *Config +} + +func (ts tokenSource) Token() (*oauth2.Token, error) { + conf := ts.conf + if !conf.canRefresh() { + return nil, errors.New("oauth2/google: The credentials do not contain the necessary fields need to refresh the access token. You must specify refresh_token, token_url, client_id, and client_secret.") + } + + clientAuth := stsexchange.ClientAuthentication{ + AuthStyle: oauth2.AuthStyleInHeader, + ClientID: conf.ClientID, + ClientSecret: conf.ClientSecret, + } + + stsResponse, err := stsexchange.RefreshAccessToken(ts.ctx, conf.TokenURL, conf.RefreshToken, clientAuth, nil) + if err != nil { + return nil, err + } + if stsResponse.ExpiresIn < 0 { + return nil, errors.New("oauth2/google: got invalid expiry from security token service") + } + + if stsResponse.RefreshToken != "" { + conf.RefreshToken = stsResponse.RefreshToken + } + + token := &oauth2.Token{ + AccessToken: stsResponse.AccessToken, + Expiry: now().Add(time.Duration(stsResponse.ExpiresIn) * time.Second), + TokenType: "Bearer", + } + return token, nil +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go similarity index 88% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go rename to vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go index 99987ce29..ebd520eac 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/clientauth.go +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/clientauth.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "encoding/base64" @@ -12,8 +12,8 @@ import ( "golang.org/x/oauth2" ) -// clientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. -type clientAuthentication struct { +// ClientAuthentication represents an OAuth client ID and secret and the mechanism for passing these credentials as stated in rfc6749#2.3.1. +type ClientAuthentication struct { // AuthStyle can be either basic or request-body AuthStyle oauth2.AuthStyle ClientID string @@ -23,7 +23,7 @@ type clientAuthentication struct { // InjectAuthentication is used to add authentication to a Secure Token Service exchange // request. It modifies either the passed url.Values or http.Header depending on the desired // authentication format. -func (c *clientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { +func (c *ClientAuthentication) InjectAuthentication(values url.Values, headers http.Header) { if c.ClientID == "" || c.ClientSecret == "" || values == nil || headers == nil { return } diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go similarity index 68% rename from vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go rename to vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go index e6fcae5fc..1a0bebd15 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/sts_exchange.go +++ b/vendor/golang.org/x/oauth2/google/internal/stsexchange/sts_exchange.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package externalaccount +package stsexchange import ( "context" @@ -18,14 +18,17 @@ import ( "golang.org/x/oauth2" ) -// exchangeToken performs an oauth2 token exchange with the provided endpoint. +func defaultHeader() http.Header { + header := make(http.Header) + header.Add("Content-Type", "application/x-www-form-urlencoded") + return header +} + +// ExchangeToken performs an oauth2 token exchange with the provided endpoint. // The first 4 fields are all mandatory. headers can be used to pass additional // headers beyond the bare minimum required by the token exchange. options can // be used to pass additional JSON-structured options to the remote server. -func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchangeRequest, authentication clientAuthentication, headers http.Header, options map[string]interface{}) (*stsTokenExchangeResponse, error) { - - client := oauth2.NewClient(ctx, nil) - +func ExchangeToken(ctx context.Context, endpoint string, request *TokenExchangeRequest, authentication ClientAuthentication, headers http.Header, options map[string]interface{}) (*Response, error) { data := url.Values{} data.Set("audience", request.Audience) data.Set("grant_type", "urn:ietf:params:oauth:grant-type:token-exchange") @@ -41,13 +44,28 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan data.Set("options", string(opts)) } + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func RefreshAccessToken(ctx context.Context, endpoint string, refreshToken string, authentication ClientAuthentication, headers http.Header) (*Response, error) { + data := url.Values{} + data.Set("grant_type", "refresh_token") + data.Set("refresh_token", refreshToken) + + return makeRequest(ctx, endpoint, data, authentication, headers) +} + +func makeRequest(ctx context.Context, endpoint string, data url.Values, authentication ClientAuthentication, headers http.Header) (*Response, error) { + if headers == nil { + headers = defaultHeader() + } + client := oauth2.NewClient(ctx, nil) authentication.InjectAuthentication(data, headers) encodedData := data.Encode() req, err := http.NewRequest("POST", endpoint, strings.NewReader(encodedData)) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to properly build http request: %v", err) - } req = req.WithContext(ctx) for key, list := range headers { @@ -71,7 +89,7 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan if c := resp.StatusCode; c < 200 || c > 299 { return nil, fmt.Errorf("oauth2/google: status code %d: %s", c, body) } - var stsResp stsTokenExchangeResponse + var stsResp Response err = json.Unmarshal(body, &stsResp) if err != nil { return nil, fmt.Errorf("oauth2/google: failed to unmarshal response body from Secure Token Server: %v", err) @@ -81,8 +99,8 @@ func exchangeToken(ctx context.Context, endpoint string, request *stsTokenExchan return &stsResp, nil } -// stsTokenExchangeRequest contains fields necessary to make an oauth2 token exchange. -type stsTokenExchangeRequest struct { +// TokenExchangeRequest contains fields necessary to make an oauth2 token exchange. +type TokenExchangeRequest struct { ActingParty struct { ActorToken string ActorTokenType string @@ -96,8 +114,8 @@ type stsTokenExchangeRequest struct { SubjectTokenType string } -// stsTokenExchangeResponse is used to decode the remote server response during an oauth2 token exchange. -type stsTokenExchangeResponse struct { +// Response is used to decode the remote server response during an oauth2 token exchange. +type Response struct { AccessToken string `json:"access_token"` IssuedTokenType string `json:"issued_token_type"` TokenType string `json:"token_type"` diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 58901bda5..e83ddeef0 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -18,6 +18,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" ) @@ -115,41 +116,60 @@ const ( AuthStyleInHeader AuthStyle = 2 ) -// authStyleCache is the set of tokenURLs we've successfully used via +// LazyAuthStyleCache is a backwards compatibility compromise to let Configs +// have a lazily-initialized AuthStyleCache. +// +// The two users of this, oauth2.Config and oauth2/clientcredentials.Config, +// both would ideally just embed an unexported AuthStyleCache but because both +// were historically allowed to be copied by value we can't retroactively add an +// uncopyable Mutex to them. +// +// We could use an atomic.Pointer, but that was added recently enough (in Go +// 1.18) that we'd break Go 1.17 users where the tests as of 2023-08-03 +// still pass. By using an atomic.Value, it supports both Go 1.17 and +// copying by value, even if that's not ideal. +type LazyAuthStyleCache struct { + v atomic.Value // of *AuthStyleCache +} + +func (lc *LazyAuthStyleCache) Get() *AuthStyleCache { + if c, ok := lc.v.Load().(*AuthStyleCache); ok { + return c + } + c := new(AuthStyleCache) + if !lc.v.CompareAndSwap(nil, c) { + c = lc.v.Load().(*AuthStyleCache) + } + return c +} + +// AuthStyleCache is the set of tokenURLs we've successfully used via // RetrieveToken and which style auth we ended up using. // It's called a cache, but it doesn't (yet?) shrink. It's expected that // the set of OAuth2 servers a program contacts over time is fixed and // small. -var authStyleCache struct { - sync.Mutex - m map[string]AuthStyle // keyed by tokenURL -} - -// ResetAuthCache resets the global authentication style cache used -// for AuthStyleUnknown token requests. -func ResetAuthCache() { - authStyleCache.Lock() - defer authStyleCache.Unlock() - authStyleCache.m = nil +type AuthStyleCache struct { + mu sync.Mutex + m map[string]AuthStyle // keyed by tokenURL } // lookupAuthStyle reports which auth style we last used with tokenURL // when calling RetrieveToken and whether we have ever done so. -func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - style, ok = authStyleCache.m[tokenURL] +func (c *AuthStyleCache) lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + c.mu.Lock() + defer c.mu.Unlock() + style, ok = c.m[tokenURL] return } // setAuthStyle adds an entry to authStyleCache, documented above. -func setAuthStyle(tokenURL string, v AuthStyle) { - authStyleCache.Lock() - defer authStyleCache.Unlock() - if authStyleCache.m == nil { - authStyleCache.m = make(map[string]AuthStyle) +func (c *AuthStyleCache) setAuthStyle(tokenURL string, v AuthStyle) { + c.mu.Lock() + defer c.mu.Unlock() + if c.m == nil { + c.m = make(map[string]AuthStyle) } - authStyleCache.m[tokenURL] = v + c.m[tokenURL] = v } // newTokenRequest returns a new *http.Request to retrieve a new token @@ -189,10 +209,10 @@ func cloneURLValues(v url.Values) url.Values { return v2 } -func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle, styleCache *AuthStyleCache) (*Token, error) { needsAuthStyleProbe := authStyle == 0 if needsAuthStyleProbe { - if style, ok := lookupAuthStyle(tokenURL); ok { + if style, ok := styleCache.lookupAuthStyle(tokenURL); ok { authStyle = style needsAuthStyleProbe = false } else { @@ -222,7 +242,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, token, err = doTokenRoundTrip(ctx, req) } if needsAuthStyleProbe && err == nil { - setAuthStyle(tokenURL, authStyle) + styleCache.setAuthStyle(tokenURL, authStyle) } // Don't overwrite `RefreshToken` with an empty value // if this was a token refreshing request. diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index 9085fabe3..90a2c3d6d 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -58,6 +58,10 @@ type Config struct { // Scope specifies optional requested permissions. Scopes []string + + // authStyleCache caches which auth style to use when Endpoint.AuthStyle is + // the zero value (AuthStyleAutoDetect). + authStyleCache internal.LazyAuthStyleCache } // A TokenSource is anything that can return a token. @@ -71,8 +75,9 @@ type TokenSource interface { // Endpoint represents an OAuth 2.0 provider's authorization and token // endpoint URLs. type Endpoint struct { - AuthURL string - TokenURL string + AuthURL string + DeviceAuthURL string + TokenURL string // AuthStyle optionally specifies how the endpoint wants the // client ID & client secret sent. The zero value means to @@ -139,15 +144,19 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // AuthCodeURL returns a URL to OAuth 2.0 provider's consent page // that asks for permissions for the required scopes explicitly. // -// State is a token to protect the user from CSRF attacks. You must -// always provide a non-empty string and validate that it matches the -// state query parameter on your redirect callback. -// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// State is an opaque value used by the client to maintain state between the +// request and callback. The authorization server includes this value when +// redirecting the user agent back to the client. // // Opts may include AccessTypeOnline or AccessTypeOffline, as well // as ApprovalForce. -// It can also be used to pass the PKCE challenge. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// +// To protect against CSRF attacks, opts should include a PKCE challenge +// (S256ChallengeOption). Not all servers support PKCE. An alternative is to +// generate a random state parameter and verify it after exchange. +// See https://datatracker.ietf.org/doc/html/rfc6749#section-10.12 (predating +// PKCE), https://www.oauth.com/oauth2-servers/pkce/ and +// https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { var buf bytes.Buffer buf.WriteString(c.Endpoint.AuthURL) @@ -162,7 +171,6 @@ func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { v.Set("scope", strings.Join(c.Scopes, " ")) } if state != "" { - // TODO(light): Docs say never to omit state; don't allow empty. v.Set("state", state) } for _, opt := range opts { @@ -207,10 +215,11 @@ func (c *Config) PasswordCredentialsToken(ctx context.Context, username, passwor // The provided context optionally controls which HTTP client is used. See the HTTPClient variable. // // The code will be in the *http.Request.FormValue("code"). Before -// calling Exchange, be sure to validate FormValue("state"). +// calling Exchange, be sure to validate FormValue("state") if you are +// using it to protect against CSRF attacks. // -// Opts may include the PKCE verifier code if previously used in AuthCodeURL. -// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +// If using PKCE to protect against CSRF attacks, opts should include a +// VerifierOption. func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { v := url.Values{ "grant_type": {"authorization_code"}, diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go new file mode 100644 index 000000000..50593b6df --- /dev/null +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -0,0 +1,68 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package oauth2 + +import ( + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "net/url" +) + +const ( + codeChallengeKey = "code_challenge" + codeChallengeMethodKey = "code_challenge_method" + codeVerifierKey = "code_verifier" +) + +// GenerateVerifier generates a PKCE code verifier with 32 octets of randomness. +// This follows recommendations in RFC 7636. +// +// A fresh verifier should be generated for each authorization. +// S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL +// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAccessToken). +func GenerateVerifier() string { + // "RECOMMENDED that the output of a suitable random number generator be + // used to create a 32-octet sequence. The octet sequence is then + // base64url-encoded to produce a 43-octet URL-safe string to use as the + // code verifier." + // https://datatracker.ietf.org/doc/html/rfc7636#section-4.1 + data := make([]byte, 32) + if _, err := rand.Read(data); err != nil { + panic(err) + } + return base64.RawURLEncoding.EncodeToString(data) +} + +// VerifierOption returns a PKCE code verifier AuthCodeOption. It should be +// passed to Config.Exchange or Config.DeviceAccessToken only. +func VerifierOption(verifier string) AuthCodeOption { + return setParam{k: codeVerifierKey, v: verifier} +} + +// S256ChallengeFromVerifier returns a PKCE code challenge derived from verifier with method S256. +// +// Prefer to use S256ChallengeOption where possible. +func S256ChallengeFromVerifier(verifier string) string { + sha := sha256.Sum256([]byte(verifier)) + return base64.RawURLEncoding.EncodeToString(sha[:]) +} + +// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// only. +func S256ChallengeOption(verifier string) AuthCodeOption { + return challengeOption{ + challenge_method: "S256", + challenge: S256ChallengeFromVerifier(verifier), + } +} + +type challengeOption struct{ challenge_method, challenge string } + +func (p challengeOption) setValue(m url.Values) { + m.Set(codeChallengeMethodKey, p.challenge_method) + m.Set(codeChallengeKey, p.challenge) +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 5ffce9764..5bbb33217 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -164,7 +164,7 @@ func tokenFromInternal(t *internal.Token) *Token { // This token is then mapped from *internal.Token into an *oauth2.Token which is returned along // with an error.. func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { - tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle), c.authStyleCache.Get()) if err != nil { if rErr, ok := err.(*internal.RetrieveError); ok { return nil, (*RetrieveError)(rErr) diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 83f112c4c..4756ad5f7 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -38,7 +38,7 @@ var X86 struct { HasAVX512F bool // Advanced vector extension 512 Foundation Instructions HasAVX512CD bool // Advanced vector extension 512 Conflict Detection Instructions HasAVX512ER bool // Advanced vector extension 512 Exponential and Reciprocal Instructions - HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions Instructions + HasAVX512PF bool // Advanced vector extension 512 Prefetch Instructions HasAVX512VL bool // Advanced vector extension 512 Vector Length Extensions HasAVX512BW bool // Advanced vector extension 512 Byte and Word Instructions HasAVX512DQ bool // Advanced vector extension 512 Doubleword and Quadword Instructions @@ -54,6 +54,9 @@ var X86 struct { HasAVX512VBMI2 bool // Advanced vector extension 512 Vector Byte Manipulation Instructions 2 HasAVX512BITALG bool // Advanced vector extension 512 Bit Algorithms HasAVX512BF16 bool // Advanced vector extension 512 BFloat16 Instructions + HasAMXTile bool // Advanced Matrix Extension Tile instructions + HasAMXInt8 bool // Advanced Matrix Extension Int8 instructions + HasAMXBF16 bool // Advanced Matrix Extension BFloat16 instructions HasBMI1 bool // Bit manipulation instruction set 1 HasBMI2 bool // Bit manipulation instruction set 2 HasCX16 bool // Compare and exchange 16 Bytes diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index bd6c128af..ff7da60eb 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -7,6 +7,6 @@ package cpu -const cacheLineSize = 32 +const cacheLineSize = 64 func initOptions() {} diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index f5aacfc82..2dcde8285 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -37,6 +37,9 @@ func initOptions() { {Name: "avx512vbmi2", Feature: &X86.HasAVX512VBMI2}, {Name: "avx512bitalg", Feature: &X86.HasAVX512BITALG}, {Name: "avx512bf16", Feature: &X86.HasAVX512BF16}, + {Name: "amxtile", Feature: &X86.HasAMXTile}, + {Name: "amxint8", Feature: &X86.HasAMXInt8}, + {Name: "amxbf16", Feature: &X86.HasAMXBF16}, {Name: "bmi1", Feature: &X86.HasBMI1}, {Name: "bmi2", Feature: &X86.HasBMI2}, {Name: "cx16", Feature: &X86.HasCX16}, @@ -138,6 +141,10 @@ func archInit() { eax71, _, _, _ := cpuid(7, 1) X86.HasAVX512BF16 = isSet(5, eax71) } + + X86.HasAMXTile = isSet(24, edx7) + X86.HasAMXInt8 = isSet(25, edx7) + X86.HasAMXBF16 = isSet(22, edx7) } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/cpu/hwcap_linux.go b/vendor/golang.org/x/sys/cpu/hwcap_linux.go index 1d9d91f3e..34e49f955 100644 --- a/vendor/golang.org/x/sys/cpu/hwcap_linux.go +++ b/vendor/golang.org/x/sys/cpu/hwcap_linux.go @@ -5,7 +5,7 @@ package cpu import ( - "io/ioutil" + "os" ) const ( @@ -39,7 +39,7 @@ func readHWCAP() error { return nil } - buf, err := ioutil.ReadFile(procAuxv) + buf, err := os.ReadFile(procAuxv) if err != nil { // e.g. on android /proc/self/auxv is not accessible, so silently // ignore the error and leave Initialized = false. On some diff --git a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go b/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go deleted file mode 100644 index e07899b90..000000000 --- a/vendor/golang.org/x/sys/internal/unsafeheader/unsafeheader.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package unsafeheader contains header declarations for the Go runtime's -// slice and string implementations. -// -// This package allows x/sys to use types equivalent to -// reflect.SliceHeader and reflect.StringHeader without introducing -// a dependency on the (relatively heavy) "reflect" package. -package unsafeheader - -import ( - "unsafe" -) - -// Slice is the runtime representation of a slice. -// It cannot be used safely or portably and its representation may change in a later release. -type Slice struct { - Data unsafe.Pointer - Len int - Cap int -} - -// String is the runtime representation of a string. -// It cannot be used safely or portably and its representation may change in a later release. -type String struct { - Data unsafe.Pointer - Len int -} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 8f775fafa..47fa6a7eb 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -583,6 +583,7 @@ ccflags="$@" $2 ~ /^PERF_/ || $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SEEK_/ || + $2 ~ /^SCHED_/ || $2 ~ /^SPLICE_/ || $2 ~ /^SYNC_FILE_RANGE_/ || $2 !~ /IOC_MAGIC/ && diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 39dba6ca6..463c3eff7 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -7,12 +7,6 @@ package unix -import "unsafe" - func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ptrace1(request, pid, addr, data) } - -func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error { - return ptrace1Ptr(request, pid, addr, data) -} diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index 9ea66330a..ed0509a01 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -7,12 +7,6 @@ package unix -import "unsafe" - func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { return ENOTSUP } - -func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - return ENOTSUP -} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 9a6e5acac..e94e6cdac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -487,8 +487,6 @@ func Fsync(fd int) error { //sys Unlinkat(dirfd int, path string, flags int) (err error) //sys Ustat(dev int, ubuf *Ustat_t) (err error) //sys write(fd int, p []byte) (n int, err error) -//sys readlen(fd int, p *byte, np int) (n int, err error) = read -//sys writelen(fd int, p *byte, np int) (n int, err error) = write //sys Dup2(oldfd int, newfd int) (err error) //sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = posix_fadvise64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 135cc3cd7..59542a897 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -644,189 +644,3 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE - -/* - * Unimplemented - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Ioctl -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// sendfile -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Poll_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 9fa879806..b37310ce9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -47,6 +47,5 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace -//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index f17b8c526..d51ec9963 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -47,6 +47,5 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace -//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index d4ce988e7..97cb916f2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -343,203 +343,5 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - * TODO(jsing): Update this list for DragonFly. - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Mount -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Setattrlist -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Getxattr -// Fgetxattr -// Setxattr -// Fsetxattr -// Removexattr -// Fremovexattr -// Listxattr -// Flistxattr -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index afb10106f..64d1bb4db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -449,197 +449,5 @@ func Dup3(oldfd, newfd, flags int) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - */ -// Profil -// Sigaction -// Sigprocmask -// Getlogin -// Sigpending -// Sigaltstack -// Ioctl -// Reboot -// Execve -// Vfork -// Sbrk -// Sstk -// Ovadvise -// Mincore -// Setitimer -// Swapon -// Select -// Sigsuspend -// Readv -// Writev -// Nfssvc -// Getfh -// Quotactl -// Mount -// Csops -// Waitid -// Add_profil -// Kdebug_trace -// Sigreturn -// Atsocket -// Kqueue_from_portset_np -// Kqueue_portset -// Getattrlist -// Setattrlist -// Getdents -// Getdirentriesattr -// Searchfs -// Delete -// Copyfile -// Watchevent -// Waitevent -// Modwatch -// Fsctl -// Initgroups -// Posix_spawn -// Nfsclnt -// Fhopen -// Minherit -// Semsys -// Msgsys -// Shmsys -// Semctl -// Semget -// Semop -// Msgctl -// Msgget -// Msgsnd -// Msgrcv -// Shmat -// Shmctl -// Shmdt -// Shmget -// Shm_open -// Shm_unlink -// Sem_open -// Sem_close -// Sem_unlink -// Sem_wait -// Sem_trywait -// Sem_post -// Sem_getvalue -// Sem_init -// Sem_destroy -// Open_extended -// Umask_extended -// Stat_extended -// Lstat_extended -// Fstat_extended -// Chmod_extended -// Fchmod_extended -// Access_extended -// Settid -// Gettid -// Setsgroups -// Getsgroups -// Setwgroups -// Getwgroups -// Mkfifo_extended -// Mkdir_extended -// Identitysvc -// Shared_region_check_np -// Shared_region_map_np -// __pthread_mutex_destroy -// __pthread_mutex_init -// __pthread_mutex_lock -// __pthread_mutex_trylock -// __pthread_mutex_unlock -// __pthread_cond_init -// __pthread_cond_destroy -// __pthread_cond_broadcast -// __pthread_cond_signal -// Setsid_with_pid -// __pthread_cond_timedwait -// Aio_fsync -// Aio_return -// Aio_suspend -// Aio_cancel -// Aio_error -// Aio_read -// Aio_write -// Lio_listio -// __pthread_cond_wait -// Iopolicysys -// __pthread_kill -// __pthread_sigmask -// __sigwait -// __disable_threadsignal -// __pthread_markcancel -// __pthread_canceled -// __semwait_signal -// Proc_info -// Stat64_extended -// Lstat64_extended -// Fstat64_extended -// __pthread_chdir -// __pthread_fchdir -// Audit -// Auditon -// Getauid -// Setauid -// Getaudit -// Setaudit -// Getaudit_addr -// Setaudit_addr -// Auditctl -// Bsdthread_create -// Bsdthread_terminate -// Stack_snapshot -// Bsdthread_register -// Workq_open -// Workq_ops -// __mac_execve -// __mac_syscall -// __mac_get_file -// __mac_set_file -// __mac_get_link -// __mac_set_link -// __mac_get_proc -// __mac_set_proc -// __mac_get_fd -// __mac_set_fd -// __mac_get_pid -// __mac_get_lcid -// __mac_get_lctx -// __mac_set_lctx -// Setlcid -// Read_nocancel -// Write_nocancel -// Open_nocancel -// Close_nocancel -// Wait4_nocancel -// Recvmsg_nocancel -// Sendmsg_nocancel -// Recvfrom_nocancel -// Accept_nocancel -// Fcntl_nocancel -// Select_nocancel -// Fsync_nocancel -// Connect_nocancel -// Sigsuspend_nocancel -// Readv_nocancel -// Writev_nocancel -// Sendto_nocancel -// Pread_nocancel -// Pwrite_nocancel -// Waitid_nocancel -// Poll_nocancel -// Msgsnd_nocancel -// Msgrcv_nocancel -// Sem_wait_nocancel -// Aio_suspend_nocancel -// __sigwait_nocancel -// __semwait_signal_nocancel -// __mac_mount -// __mac_get_mount -// __mac_getfsstat diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index a730878e4..fb4e50224 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -693,10 +693,10 @@ type SockaddrALG struct { func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { // Leave room for NUL byte terminator. - if len(sa.Type) > 13 { + if len(sa.Type) > len(sa.raw.Type)-1 { return nil, 0, EINVAL } - if len(sa.Name) > 63 { + if len(sa.Name) > len(sa.raw.Name)-1 { return nil, 0, EINVAL } @@ -704,17 +704,8 @@ func (sa *SockaddrALG) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Feat = sa.Feature sa.raw.Mask = sa.Mask - typ, err := ByteSliceFromString(sa.Type) - if err != nil { - return nil, 0, err - } - name, err := ByteSliceFromString(sa.Name) - if err != nil { - return nil, 0, err - } - - copy(sa.raw.Type[:], typ) - copy(sa.raw.Name[:], name) + copy(sa.raw.Type[:], sa.Type) + copy(sa.raw.Name[:], sa.Name) return unsafe.Pointer(&sa.raw), SizeofSockaddrALG, nil } @@ -1988,8 +1979,6 @@ func Signalfd(fd int, sigmask *Sigset_t, flags int) (newfd int, err error) { //sys Unshare(flags int) (err error) //sys write(fd int, p []byte) (n int, err error) //sys exitThread(code int) (err error) = SYS_EXIT -//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ -//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE //sys readv(fd int, iovs []Iovec) (n int, err error) = SYS_READV //sys writev(fd int, iovs []Iovec) (n int, err error) = SYS_WRITEV //sys preadv(fd int, iovs []Iovec, offs_l uintptr, offs_h uintptr) (n int, err error) = SYS_PREADV @@ -2471,98 +2460,25 @@ func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask * return pselect6(nfd, r, w, e, mutableTimeout, kernelMask) } -/* - * Unimplemented - */ -// AfsSyscall -// ArchPrctl -// Brk -// ClockNanosleep -// ClockSettime -// Clone -// EpollCtlOld -// EpollPwait -// EpollWaitOld -// Execve -// Fork -// Futex -// GetKernelSyms -// GetMempolicy -// GetRobustList -// GetThreadArea -// Getpmsg -// IoCancel -// IoDestroy -// IoGetevents -// IoSetup -// IoSubmit -// IoprioGet -// IoprioSet -// KexecLoad -// LookupDcookie -// Mbind -// MigratePages -// Mincore -// ModifyLdt -// Mount -// MovePages -// MqGetsetattr -// MqNotify -// MqOpen -// MqTimedreceive -// MqTimedsend -// MqUnlink -// Msgctl -// Msgget -// Msgrcv -// Msgsnd -// Nfsservctl -// Personality -// Pselect6 -// Ptrace -// Putpmsg -// Quotactl -// Readahead -// Readv -// RemapFilePages -// RestartSyscall -// RtSigaction -// RtSigpending -// RtSigqueueinfo -// RtSigreturn -// RtSigsuspend -// RtSigtimedwait -// SchedGetPriorityMax -// SchedGetPriorityMin -// SchedGetparam -// SchedGetscheduler -// SchedRrGetInterval -// SchedSetparam -// SchedYield -// Security -// Semctl -// Semget -// Semop -// Semtimedop -// SetMempolicy -// SetRobustList -// SetThreadArea -// SetTidAddress -// Sigaltstack -// Swapoff -// Swapon -// Sysfs -// TimerCreate -// TimerDelete -// TimerGetoverrun -// TimerGettime -// TimerSettime -// Tkill (obsolete) -// Tuxcall -// Umount2 -// Uselib -// Utimensat -// Vfork -// Vhangup -// Vserver -// _Sysctl +//sys schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) +//sys schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) + +// SchedSetAttr is a wrapper for sched_setattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_setattr.2.html +func SchedSetAttr(pid int, attr *SchedAttr, flags uint) error { + if attr == nil { + return EINVAL + } + attr.Size = SizeofSchedAttr + return schedSetattr(pid, attr, flags) +} + +// SchedGetAttr is a wrapper for sched_getattr(2) syscall. +// https://man7.org/linux/man-pages/man2/sched_getattr.2.html +func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) { + attr := &SchedAttr{} + if err := schedGetattr(pid, attr, SizeofSchedAttr, flags); err != nil { + return nil, err + } + return attr, nil +} diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index ddd1ac853..88162099a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -356,8 +356,6 @@ func Statvfs(path string, buf *Statvfs_t) (err error) { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) const ( @@ -371,262 +369,3 @@ const ( func mremap(oldaddr uintptr, oldlength uintptr, newlength uintptr, flags int, newaddr uintptr) (uintptr, error) { return mremapNetBSD(oldaddr, oldlength, newaddr, newlength, flags) } - -/* - * Unimplemented - */ -// ____semctl13 -// __clone -// __fhopen40 -// __fhstat40 -// __fhstatvfs140 -// __fstat30 -// __getcwd -// __getfh30 -// __getlogin -// __lstat30 -// __mount50 -// __msgctl13 -// __msync13 -// __ntp_gettime30 -// __posix_chown -// __posix_fchown -// __posix_lchown -// __posix_rename -// __setlogin -// __shmctl13 -// __sigaction_sigtramp -// __sigaltstack14 -// __sigpending14 -// __sigprocmask14 -// __sigsuspend14 -// __sigtimedwait -// __stat30 -// __syscall -// __vfork14 -// _ksem_close -// _ksem_destroy -// _ksem_getvalue -// _ksem_init -// _ksem_open -// _ksem_post -// _ksem_trywait -// _ksem_unlink -// _ksem_wait -// _lwp_continue -// _lwp_create -// _lwp_ctl -// _lwp_detach -// _lwp_exit -// _lwp_getname -// _lwp_getprivate -// _lwp_kill -// _lwp_park -// _lwp_self -// _lwp_setname -// _lwp_setprivate -// _lwp_suspend -// _lwp_unpark -// _lwp_unpark_all -// _lwp_wait -// _lwp_wakeup -// _pset_bind -// _sched_getaffinity -// _sched_getparam -// _sched_setaffinity -// _sched_setparam -// acct -// aio_cancel -// aio_error -// aio_fsync -// aio_read -// aio_return -// aio_suspend -// aio_write -// break -// clock_getres -// clock_gettime -// clock_settime -// compat_09_ogetdomainname -// compat_09_osetdomainname -// compat_09_ouname -// compat_10_omsgsys -// compat_10_osemsys -// compat_10_oshmsys -// compat_12_fstat12 -// compat_12_getdirentries -// compat_12_lstat12 -// compat_12_msync -// compat_12_oreboot -// compat_12_oswapon -// compat_12_stat12 -// compat_13_sigaction13 -// compat_13_sigaltstack13 -// compat_13_sigpending13 -// compat_13_sigprocmask13 -// compat_13_sigreturn13 -// compat_13_sigsuspend13 -// compat_14___semctl -// compat_14_msgctl -// compat_14_shmctl -// compat_16___sigaction14 -// compat_16___sigreturn14 -// compat_20_fhstatfs -// compat_20_fstatfs -// compat_20_getfsstat -// compat_20_statfs -// compat_30___fhstat30 -// compat_30___fstat13 -// compat_30___lstat13 -// compat_30___stat13 -// compat_30_fhopen -// compat_30_fhstat -// compat_30_fhstatvfs1 -// compat_30_getdents -// compat_30_getfh -// compat_30_ntp_gettime -// compat_30_socket -// compat_40_mount -// compat_43_fstat43 -// compat_43_lstat43 -// compat_43_oaccept -// compat_43_ocreat -// compat_43_oftruncate -// compat_43_ogetdirentries -// compat_43_ogetdtablesize -// compat_43_ogethostid -// compat_43_ogethostname -// compat_43_ogetkerninfo -// compat_43_ogetpagesize -// compat_43_ogetpeername -// compat_43_ogetrlimit -// compat_43_ogetsockname -// compat_43_okillpg -// compat_43_olseek -// compat_43_ommap -// compat_43_oquota -// compat_43_orecv -// compat_43_orecvfrom -// compat_43_orecvmsg -// compat_43_osend -// compat_43_osendmsg -// compat_43_osethostid -// compat_43_osethostname -// compat_43_osigblock -// compat_43_osigsetmask -// compat_43_osigstack -// compat_43_osigvec -// compat_43_otruncate -// compat_43_owait -// compat_43_stat43 -// execve -// extattr_delete_fd -// extattr_delete_file -// extattr_delete_link -// extattr_get_fd -// extattr_get_file -// extattr_get_link -// extattr_list_fd -// extattr_list_file -// extattr_list_link -// extattr_set_fd -// extattr_set_file -// extattr_set_link -// extattrctl -// fchroot -// fdatasync -// fgetxattr -// fktrace -// flistxattr -// fork -// fremovexattr -// fsetxattr -// fstatvfs1 -// fsync_range -// getcontext -// getitimer -// getvfsstat -// getxattr -// ktrace -// lchflags -// lchmod -// lfs_bmapv -// lfs_markv -// lfs_segclean -// lfs_segwait -// lgetxattr -// lio_listio -// listxattr -// llistxattr -// lremovexattr -// lseek -// lsetxattr -// lutimes -// madvise -// mincore -// minherit -// modctl -// mq_close -// mq_getattr -// mq_notify -// mq_open -// mq_receive -// mq_send -// mq_setattr -// mq_timedreceive -// mq_timedsend -// mq_unlink -// msgget -// msgrcv -// msgsnd -// nfssvc -// ntp_adjtime -// pmc_control -// pmc_get_info -// pollts -// preadv -// profil -// pselect -// pset_assign -// pset_create -// pset_destroy -// ptrace -// pwritev -// quotactl -// rasctl -// readv -// reboot -// removexattr -// sa_enable -// sa_preempt -// sa_register -// sa_setconcurrency -// sa_stacks -// sa_yield -// sbrk -// sched_yield -// semconfig -// semget -// semop -// setcontext -// setitimer -// setxattr -// shmat -// shmdt -// shmget -// sstk -// statvfs1 -// swapctl -// sysarch -// syscall -// timer_create -// timer_delete -// timer_getoverrun -// timer_gettime -// timer_settime -// undelete -// utrace -// uuidgen -// vadvise -// vfork -// writev diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index c5f166a11..6f34479b5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -326,78 +326,4 @@ func Uname(uname *Utsname) error { //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ -//sys writelen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_WRITE //sys utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) - -/* - * Unimplemented - */ -// __getcwd -// __semctl -// __syscall -// __sysctl -// adjfreq -// break -// clock_getres -// clock_gettime -// clock_settime -// closefrom -// execve -// fhopen -// fhstat -// fhstatfs -// fork -// futimens -// getfh -// getgid -// getitimer -// getlogin -// getthrid -// ktrace -// lfs_bmapv -// lfs_markv -// lfs_segclean -// lfs_segwait -// mincore -// minherit -// mount -// mquery -// msgctl -// msgget -// msgrcv -// msgsnd -// nfssvc -// nnpfspioctl -// preadv -// profil -// pwritev -// quotactl -// readv -// reboot -// renameat -// rfork -// sched_yield -// semget -// semop -// setgroups -// setitimer -// setsockopt -// shmat -// shmctl -// shmdt -// shmget -// sigaction -// sigaltstack -// sigpending -// sigprocmask -// sigreturn -// sigsuspend -// sysarch -// syscall -// threxit -// thrsigdivert -// thrsleep -// thrwakeup -// vfork -// writev diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 72d23575f..b99cfa134 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -698,24 +698,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) = libsocket.setsockopt //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) = libsocket.recvfrom -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf), 0, 0, 0) - n = int(r0) - if e1 != 0 { - err = e1 - } - return -} - // Event Ports type fileObjCookie struct { diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 8bb30e7ce..f6eda2705 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -549,6 +549,9 @@ func SetNonblock(fd int, nonblocking bool) (err error) { if err != nil { return err } + if (flag&O_NONBLOCK != 0) == nonblocking { + return nil + } if nonblocking { flag |= O_NONBLOCK } else { diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index 44e72edb4..4596d041c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -192,7 +192,6 @@ func (cmsg *Cmsghdr) SetLen(length int) { //sys fcntl(fd int, cmd int, arg int) (val int, err error) //sys read(fd int, p []byte) (n int, err error) -//sys readlen(fd int, buf *byte, nbuf int) (n int, err error) = SYS_READ //sys write(fd int, p []byte) (n int, err error) //sys accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) = SYS___ACCEPT_A diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 3784f402e..f9c7f479b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -2421,6 +2421,15 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_RISCV_V_GET_CONTROL = 0x46 + PR_RISCV_V_SET_CONTROL = 0x45 + PR_RISCV_V_VSTATE_CTRL_CUR_MASK = 0x3 + PR_RISCV_V_VSTATE_CTRL_DEFAULT = 0x0 + PR_RISCV_V_VSTATE_CTRL_INHERIT = 0x10 + PR_RISCV_V_VSTATE_CTRL_MASK = 0x1f + PR_RISCV_V_VSTATE_CTRL_NEXT_MASK = 0xc + PR_RISCV_V_VSTATE_CTRL_OFF = 0x1 + PR_RISCV_V_VSTATE_CTRL_ON = 0x2 PR_SCHED_CORE = 0x3e PR_SCHED_CORE_CREATE = 0x1 PR_SCHED_CORE_GET = 0x0 @@ -2821,6 +2830,23 @@ const ( RWF_SUPPORTED = 0x1f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 + SCHED_BATCH = 0x3 + SCHED_DEADLINE = 0x6 + SCHED_FIFO = 0x1 + SCHED_FLAG_ALL = 0x7f + SCHED_FLAG_DL_OVERRUN = 0x4 + SCHED_FLAG_KEEP_ALL = 0x18 + SCHED_FLAG_KEEP_PARAMS = 0x10 + SCHED_FLAG_KEEP_POLICY = 0x8 + SCHED_FLAG_RECLAIM = 0x2 + SCHED_FLAG_RESET_ON_FORK = 0x1 + SCHED_FLAG_UTIL_CLAMP = 0x60 + SCHED_FLAG_UTIL_CLAMP_MAX = 0x40 + SCHED_FLAG_UTIL_CLAMP_MIN = 0x20 + SCHED_IDLE = 0x5 + SCHED_NORMAL = 0x0 + SCHED_RESET_ON_FORK = 0x40000000 + SCHED_RR = 0x2 SCM_CREDENTIALS = 0x2 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x1d diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index cfb143001..30aee00a5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index df64f2d59..8ebfa5127 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -327,10 +327,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 3025cd5b2..271a21cdc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -333,10 +333,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 09e1ffbef..910c330a3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -323,10 +323,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index a45723540..a640798c9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -118,6 +118,8 @@ const ( IUCLC = 0x200 IXOFF = 0x1000 IXON = 0x400 + LASX_CTX_MAGIC = 0x41535801 + LSX_CTX_MAGIC = 0x53580001 MAP_ANON = 0x20 MAP_ANONYMOUS = 0x20 MAP_DENYWRITE = 0x800 @@ -317,10 +319,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index fee7dfb81..0d5925d34 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index a5b2373ae..d72a00e0b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 5dde82c98..02ba129f8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 2e80ea6b3..8daa6dd96 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -326,10 +326,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index a65dcd7cb..63c8fa2f7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -381,10 +381,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index cbd34e3d8..930799ec1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -385,10 +385,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e4afa7a31..8605a7dd7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -385,10 +385,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 44f45a039..95a016f1c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -314,10 +314,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 74733e260..1ae0108f5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -389,10 +389,12 @@ const ( SO_NOFCS = 0x2b SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 + SO_PASSPIDFD = 0x4c SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 SO_PEERGROUPS = 0x3b + SO_PEERPIDFD = 0x4d SO_PEERSEC = 0x1f SO_PREFER_BUSY_POLL = 0x45 SO_PROTOCOL = 0x26 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index f5f3934b1..1bb7c6333 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -428,10 +428,12 @@ const ( SO_NOFCS = 0x27 SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 + SO_PASSPIDFD = 0x55 SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 SO_PEERGROUPS = 0x3d + SO_PEERPIDFD = 0x56 SO_PEERSEC = 0x1e SO_PREFER_BUSY_POLL = 0x48 SO_PROTOCOL = 0x1028 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go index 9a257219d..d1d1d2331 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go @@ -817,28 +817,6 @@ func write(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.read(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) - n = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, er := C.write(C.int(fd), C.uintptr_t(uintptr(unsafe.Pointer(p))), C.size_t(np)) - n = int(r0) - if r0 == -1 && er != nil { - err = er - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Dup2(oldfd int, newfd int) (err error) { r0, er := C.dup2(C.int(oldfd), C.int(newfd)) if r0 == -1 && er != nil { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go index 6de80c20c..f99a18adc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_aix_ppc64.go @@ -762,28 +762,6 @@ func write(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, e1 := callread(fd, uintptr(unsafe.Pointer(p)), np) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, e1 := callwrite(fd, uintptr(unsafe.Pointer(p)), np) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Dup2(oldfd int, newfd int) (err error) { _, e1 := calldup2(oldfd, newfd) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 4037ccf7a..1cad561e9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -725,6 +725,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -733,10 +739,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2410,28 +2412,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2521,14 +2501,6 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } -func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 4baaed0bc..8b8bb2840 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -5,703 +5,586 @@ TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) - GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) - GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) - GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) - GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) - GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) - GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) - GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) - GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) - GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) - GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) - GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) - GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) - GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) - GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmat(SB) - GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmctl(SB) - GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmdt(SB) - GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmget(SB) - GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) - GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) - GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) - GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) - GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) - GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) - GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mount(SB) - GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) @@ -712,192 +595,160 @@ DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) - GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) - GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) - GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat64_trampoline_addr(SB)/8, $libc_fstat64_trampoline<>(SB) TEXT libc_fstatat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) - GLOBL ·libc_fstatat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat64_trampoline_addr(SB)/8, $libc_fstatat64_trampoline<>(SB) TEXT libc_fstatfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) - GLOBL ·libc_fstatfs64_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs64_trampoline_addr(SB)/8, $libc_fstatfs64_trampoline<>(SB) TEXT libc_getfsstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) - GLOBL ·libc_getfsstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_getfsstat64_trampoline_addr(SB)/8, $libc_getfsstat64_trampoline<>(SB) TEXT libc_lstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat64(SB) - GLOBL ·libc_lstat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat64_trampoline_addr(SB)/8, $libc_lstat64_trampoline<>(SB) TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) - GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) TEXT libc_stat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat64(SB) - GLOBL ·libc_stat64_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat64_trampoline_addr(SB)/8, $libc_stat64_trampoline<>(SB) TEXT libc_statfs64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs64(SB) - GLOBL ·libc_statfs64_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs64_trampoline_addr(SB)/8, $libc_statfs64_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 51d6f3fb2..b18edbd0e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -725,6 +725,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -733,10 +739,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2410,28 +2412,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -2521,14 +2501,6 @@ func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { return } -func ptrace1Ptr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { - _, _, e1 := syscall_syscall6(libc_ptrace_trampoline_addr, uintptr(request), uintptr(pid), addr, uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - var libc_ptrace_trampoline_addr uintptr //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index c3b82c037..08362c1ab 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -5,703 +5,586 @@ TEXT libc_fdopendir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fdopendir(SB) - GLOBL ·libc_fdopendir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fdopendir_trampoline_addr(SB)/8, $libc_fdopendir_trampoline<>(SB) TEXT libc_getgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgroups(SB) - GLOBL ·libc_getgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgroups_trampoline_addr(SB)/8, $libc_getgroups_trampoline<>(SB) TEXT libc_setgroups_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgroups(SB) - GLOBL ·libc_setgroups_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgroups_trampoline_addr(SB)/8, $libc_setgroups_trampoline<>(SB) TEXT libc_wait4_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_wait4(SB) - GLOBL ·libc_wait4_trampoline_addr(SB), RODATA, $8 DATA ·libc_wait4_trampoline_addr(SB)/8, $libc_wait4_trampoline<>(SB) TEXT libc_accept_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_accept(SB) - GLOBL ·libc_accept_trampoline_addr(SB), RODATA, $8 DATA ·libc_accept_trampoline_addr(SB)/8, $libc_accept_trampoline<>(SB) TEXT libc_bind_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_bind(SB) - GLOBL ·libc_bind_trampoline_addr(SB), RODATA, $8 DATA ·libc_bind_trampoline_addr(SB)/8, $libc_bind_trampoline<>(SB) TEXT libc_connect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_connect(SB) - GLOBL ·libc_connect_trampoline_addr(SB), RODATA, $8 DATA ·libc_connect_trampoline_addr(SB)/8, $libc_connect_trampoline<>(SB) TEXT libc_socket_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socket(SB) - GLOBL ·libc_socket_trampoline_addr(SB), RODATA, $8 DATA ·libc_socket_trampoline_addr(SB)/8, $libc_socket_trampoline<>(SB) TEXT libc_getsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockopt(SB) - GLOBL ·libc_getsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockopt_trampoline_addr(SB)/8, $libc_getsockopt_trampoline<>(SB) TEXT libc_setsockopt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsockopt(SB) - GLOBL ·libc_setsockopt_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsockopt_trampoline_addr(SB)/8, $libc_setsockopt_trampoline<>(SB) TEXT libc_getpeername_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpeername(SB) - GLOBL ·libc_getpeername_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpeername_trampoline_addr(SB)/8, $libc_getpeername_trampoline<>(SB) TEXT libc_getsockname_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsockname(SB) - GLOBL ·libc_getsockname_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsockname_trampoline_addr(SB)/8, $libc_getsockname_trampoline<>(SB) TEXT libc_shutdown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shutdown(SB) - GLOBL ·libc_shutdown_trampoline_addr(SB), RODATA, $8 DATA ·libc_shutdown_trampoline_addr(SB)/8, $libc_shutdown_trampoline<>(SB) TEXT libc_socketpair_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_socketpair(SB) - GLOBL ·libc_socketpair_trampoline_addr(SB), RODATA, $8 DATA ·libc_socketpair_trampoline_addr(SB)/8, $libc_socketpair_trampoline<>(SB) TEXT libc_recvfrom_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvfrom(SB) - GLOBL ·libc_recvfrom_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvfrom_trampoline_addr(SB)/8, $libc_recvfrom_trampoline<>(SB) TEXT libc_sendto_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendto(SB) - GLOBL ·libc_sendto_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendto_trampoline_addr(SB)/8, $libc_sendto_trampoline<>(SB) TEXT libc_recvmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_recvmsg(SB) - GLOBL ·libc_recvmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_recvmsg_trampoline_addr(SB)/8, $libc_recvmsg_trampoline<>(SB) TEXT libc_sendmsg_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) - GLOBL ·libc_sendmsg_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendmsg_trampoline_addr(SB)/8, $libc_sendmsg_trampoline<>(SB) TEXT libc_kevent_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) - GLOBL ·libc_kevent_trampoline_addr(SB), RODATA, $8 DATA ·libc_kevent_trampoline_addr(SB)/8, $libc_kevent_trampoline<>(SB) TEXT libc_utimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) - GLOBL ·libc_utimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimes_trampoline_addr(SB)/8, $libc_utimes_trampoline<>(SB) TEXT libc_futimes_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_futimes(SB) - GLOBL ·libc_futimes_trampoline_addr(SB), RODATA, $8 DATA ·libc_futimes_trampoline_addr(SB)/8, $libc_futimes_trampoline<>(SB) TEXT libc_poll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_poll(SB) - GLOBL ·libc_poll_trampoline_addr(SB), RODATA, $8 DATA ·libc_poll_trampoline_addr(SB)/8, $libc_poll_trampoline<>(SB) TEXT libc_madvise_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_madvise(SB) - GLOBL ·libc_madvise_trampoline_addr(SB), RODATA, $8 DATA ·libc_madvise_trampoline_addr(SB)/8, $libc_madvise_trampoline<>(SB) TEXT libc_mlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlock(SB) - GLOBL ·libc_mlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlock_trampoline_addr(SB)/8, $libc_mlock_trampoline<>(SB) TEXT libc_mlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mlockall(SB) - GLOBL ·libc_mlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_mlockall_trampoline_addr(SB)/8, $libc_mlockall_trampoline<>(SB) TEXT libc_mprotect_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mprotect(SB) - GLOBL ·libc_mprotect_trampoline_addr(SB), RODATA, $8 DATA ·libc_mprotect_trampoline_addr(SB)/8, $libc_mprotect_trampoline<>(SB) TEXT libc_msync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_msync(SB) - GLOBL ·libc_msync_trampoline_addr(SB), RODATA, $8 DATA ·libc_msync_trampoline_addr(SB)/8, $libc_msync_trampoline<>(SB) TEXT libc_munlock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) - GLOBL ·libc_munlock_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlock_trampoline_addr(SB)/8, $libc_munlock_trampoline<>(SB) TEXT libc_munlockall_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) - GLOBL ·libc_munlockall_trampoline_addr(SB), RODATA, $8 DATA ·libc_munlockall_trampoline_addr(SB)/8, $libc_munlockall_trampoline<>(SB) TEXT libc_closedir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_closedir(SB) - GLOBL ·libc_closedir_trampoline_addr(SB), RODATA, $8 DATA ·libc_closedir_trampoline_addr(SB)/8, $libc_closedir_trampoline<>(SB) TEXT libc_readdir_r_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readdir_r(SB) - GLOBL ·libc_readdir_r_trampoline_addr(SB), RODATA, $8 DATA ·libc_readdir_r_trampoline_addr(SB)/8, $libc_readdir_r_trampoline<>(SB) TEXT libc_pipe_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pipe(SB) - GLOBL ·libc_pipe_trampoline_addr(SB), RODATA, $8 DATA ·libc_pipe_trampoline_addr(SB)/8, $libc_pipe_trampoline<>(SB) TEXT libc_getxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getxattr(SB) - GLOBL ·libc_getxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_getxattr_trampoline_addr(SB)/8, $libc_getxattr_trampoline<>(SB) TEXT libc_fgetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fgetxattr(SB) - GLOBL ·libc_fgetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fgetxattr_trampoline_addr(SB)/8, $libc_fgetxattr_trampoline<>(SB) TEXT libc_setxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setxattr(SB) - GLOBL ·libc_setxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_setxattr_trampoline_addr(SB)/8, $libc_setxattr_trampoline<>(SB) TEXT libc_fsetxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsetxattr(SB) - GLOBL ·libc_fsetxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsetxattr_trampoline_addr(SB)/8, $libc_fsetxattr_trampoline<>(SB) TEXT libc_removexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_removexattr(SB) - GLOBL ·libc_removexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_removexattr_trampoline_addr(SB)/8, $libc_removexattr_trampoline<>(SB) TEXT libc_fremovexattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fremovexattr(SB) - GLOBL ·libc_fremovexattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_fremovexattr_trampoline_addr(SB)/8, $libc_fremovexattr_trampoline<>(SB) TEXT libc_listxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listxattr(SB) - GLOBL ·libc_listxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_listxattr_trampoline_addr(SB)/8, $libc_listxattr_trampoline<>(SB) TEXT libc_flistxattr_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flistxattr(SB) - GLOBL ·libc_flistxattr_trampoline_addr(SB), RODATA, $8 DATA ·libc_flistxattr_trampoline_addr(SB)/8, $libc_flistxattr_trampoline<>(SB) TEXT libc_utimensat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_utimensat(SB) - GLOBL ·libc_utimensat_trampoline_addr(SB), RODATA, $8 DATA ·libc_utimensat_trampoline_addr(SB)/8, $libc_utimensat_trampoline<>(SB) TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fcntl(SB) - GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) TEXT libc_kill_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kill(SB) - GLOBL ·libc_kill_trampoline_addr(SB), RODATA, $8 DATA ·libc_kill_trampoline_addr(SB)/8, $libc_kill_trampoline<>(SB) TEXT libc_ioctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ioctl(SB) - GLOBL ·libc_ioctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_ioctl_trampoline_addr(SB)/8, $libc_ioctl_trampoline<>(SB) TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sysctl(SB) - GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) - GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 DATA ·libc_sendfile_trampoline_addr(SB)/8, $libc_sendfile_trampoline<>(SB) TEXT libc_shmat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmat(SB) - GLOBL ·libc_shmat_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmat_trampoline_addr(SB)/8, $libc_shmat_trampoline<>(SB) TEXT libc_shmctl_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmctl(SB) - GLOBL ·libc_shmctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmctl_trampoline_addr(SB)/8, $libc_shmctl_trampoline<>(SB) TEXT libc_shmdt_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmdt(SB) - GLOBL ·libc_shmdt_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmdt_trampoline_addr(SB)/8, $libc_shmdt_trampoline<>(SB) TEXT libc_shmget_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_shmget(SB) - GLOBL ·libc_shmget_trampoline_addr(SB), RODATA, $8 DATA ·libc_shmget_trampoline_addr(SB)/8, $libc_shmget_trampoline<>(SB) TEXT libc_access_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_access(SB) - GLOBL ·libc_access_trampoline_addr(SB), RODATA, $8 DATA ·libc_access_trampoline_addr(SB)/8, $libc_access_trampoline<>(SB) TEXT libc_adjtime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_adjtime(SB) - GLOBL ·libc_adjtime_trampoline_addr(SB), RODATA, $8 DATA ·libc_adjtime_trampoline_addr(SB)/8, $libc_adjtime_trampoline<>(SB) TEXT libc_chdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chdir(SB) - GLOBL ·libc_chdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_chdir_trampoline_addr(SB)/8, $libc_chdir_trampoline<>(SB) TEXT libc_chflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chflags(SB) - GLOBL ·libc_chflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_chflags_trampoline_addr(SB)/8, $libc_chflags_trampoline<>(SB) TEXT libc_chmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chmod(SB) - GLOBL ·libc_chmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_chmod_trampoline_addr(SB)/8, $libc_chmod_trampoline<>(SB) TEXT libc_chown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chown(SB) - GLOBL ·libc_chown_trampoline_addr(SB), RODATA, $8 DATA ·libc_chown_trampoline_addr(SB)/8, $libc_chown_trampoline<>(SB) TEXT libc_chroot_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) - GLOBL ·libc_chroot_trampoline_addr(SB), RODATA, $8 DATA ·libc_chroot_trampoline_addr(SB)/8, $libc_chroot_trampoline<>(SB) TEXT libc_clock_gettime_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clock_gettime(SB) - GLOBL ·libc_clock_gettime_trampoline_addr(SB), RODATA, $8 DATA ·libc_clock_gettime_trampoline_addr(SB)/8, $libc_clock_gettime_trampoline<>(SB) TEXT libc_close_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_close(SB) - GLOBL ·libc_close_trampoline_addr(SB), RODATA, $8 DATA ·libc_close_trampoline_addr(SB)/8, $libc_close_trampoline<>(SB) TEXT libc_clonefile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefile(SB) - GLOBL ·libc_clonefile_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefile_trampoline_addr(SB)/8, $libc_clonefile_trampoline<>(SB) TEXT libc_clonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_clonefileat(SB) - GLOBL ·libc_clonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_clonefileat_trampoline_addr(SB)/8, $libc_clonefileat_trampoline<>(SB) TEXT libc_dup_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup(SB) - GLOBL ·libc_dup_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup_trampoline_addr(SB)/8, $libc_dup_trampoline<>(SB) TEXT libc_dup2_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_dup2(SB) - GLOBL ·libc_dup2_trampoline_addr(SB), RODATA, $8 DATA ·libc_dup2_trampoline_addr(SB)/8, $libc_dup2_trampoline<>(SB) TEXT libc_exchangedata_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exchangedata(SB) - GLOBL ·libc_exchangedata_trampoline_addr(SB), RODATA, $8 DATA ·libc_exchangedata_trampoline_addr(SB)/8, $libc_exchangedata_trampoline<>(SB) TEXT libc_exit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_exit(SB) - GLOBL ·libc_exit_trampoline_addr(SB), RODATA, $8 DATA ·libc_exit_trampoline_addr(SB)/8, $libc_exit_trampoline<>(SB) TEXT libc_faccessat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_faccessat(SB) - GLOBL ·libc_faccessat_trampoline_addr(SB), RODATA, $8 DATA ·libc_faccessat_trampoline_addr(SB)/8, $libc_faccessat_trampoline<>(SB) TEXT libc_fchdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchdir(SB) - GLOBL ·libc_fchdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchdir_trampoline_addr(SB)/8, $libc_fchdir_trampoline<>(SB) TEXT libc_fchflags_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchflags(SB) - GLOBL ·libc_fchflags_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchflags_trampoline_addr(SB)/8, $libc_fchflags_trampoline<>(SB) TEXT libc_fchmod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmod(SB) - GLOBL ·libc_fchmod_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmod_trampoline_addr(SB)/8, $libc_fchmod_trampoline<>(SB) TEXT libc_fchmodat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchmodat(SB) - GLOBL ·libc_fchmodat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchmodat_trampoline_addr(SB)/8, $libc_fchmodat_trampoline<>(SB) TEXT libc_fchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchown(SB) - GLOBL ·libc_fchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchown_trampoline_addr(SB)/8, $libc_fchown_trampoline<>(SB) TEXT libc_fchownat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fchownat(SB) - GLOBL ·libc_fchownat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fchownat_trampoline_addr(SB)/8, $libc_fchownat_trampoline<>(SB) TEXT libc_fclonefileat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fclonefileat(SB) - GLOBL ·libc_fclonefileat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fclonefileat_trampoline_addr(SB)/8, $libc_fclonefileat_trampoline<>(SB) TEXT libc_flock_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_flock(SB) - GLOBL ·libc_flock_trampoline_addr(SB), RODATA, $8 DATA ·libc_flock_trampoline_addr(SB)/8, $libc_flock_trampoline<>(SB) TEXT libc_fpathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fpathconf(SB) - GLOBL ·libc_fpathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_fpathconf_trampoline_addr(SB)/8, $libc_fpathconf_trampoline<>(SB) TEXT libc_fsync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fsync(SB) - GLOBL ·libc_fsync_trampoline_addr(SB), RODATA, $8 DATA ·libc_fsync_trampoline_addr(SB)/8, $libc_fsync_trampoline<>(SB) TEXT libc_ftruncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ftruncate(SB) - GLOBL ·libc_ftruncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_ftruncate_trampoline_addr(SB)/8, $libc_ftruncate_trampoline<>(SB) TEXT libc_getcwd_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getcwd(SB) - GLOBL ·libc_getcwd_trampoline_addr(SB), RODATA, $8 DATA ·libc_getcwd_trampoline_addr(SB)/8, $libc_getcwd_trampoline<>(SB) TEXT libc_getdtablesize_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getdtablesize(SB) - GLOBL ·libc_getdtablesize_trampoline_addr(SB), RODATA, $8 DATA ·libc_getdtablesize_trampoline_addr(SB)/8, $libc_getdtablesize_trampoline<>(SB) TEXT libc_getegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getegid(SB) - GLOBL ·libc_getegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getegid_trampoline_addr(SB)/8, $libc_getegid_trampoline<>(SB) TEXT libc_geteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_geteuid(SB) - GLOBL ·libc_geteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_geteuid_trampoline_addr(SB)/8, $libc_geteuid_trampoline<>(SB) TEXT libc_getgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getgid(SB) - GLOBL ·libc_getgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getgid_trampoline_addr(SB)/8, $libc_getgid_trampoline<>(SB) TEXT libc_getpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgid(SB) - GLOBL ·libc_getpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgid_trampoline_addr(SB)/8, $libc_getpgid_trampoline<>(SB) TEXT libc_getpgrp_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpgrp(SB) - GLOBL ·libc_getpgrp_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpgrp_trampoline_addr(SB)/8, $libc_getpgrp_trampoline<>(SB) TEXT libc_getpid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpid(SB) - GLOBL ·libc_getpid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpid_trampoline_addr(SB)/8, $libc_getpid_trampoline<>(SB) TEXT libc_getppid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getppid(SB) - GLOBL ·libc_getppid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getppid_trampoline_addr(SB)/8, $libc_getppid_trampoline<>(SB) TEXT libc_getpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getpriority(SB) - GLOBL ·libc_getpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_getpriority_trampoline_addr(SB)/8, $libc_getpriority_trampoline<>(SB) TEXT libc_getrlimit_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrlimit(SB) - GLOBL ·libc_getrlimit_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrlimit_trampoline_addr(SB)/8, $libc_getrlimit_trampoline<>(SB) TEXT libc_getrusage_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getrusage(SB) - GLOBL ·libc_getrusage_trampoline_addr(SB), RODATA, $8 DATA ·libc_getrusage_trampoline_addr(SB)/8, $libc_getrusage_trampoline<>(SB) TEXT libc_getsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getsid(SB) - GLOBL ·libc_getsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getsid_trampoline_addr(SB)/8, $libc_getsid_trampoline<>(SB) TEXT libc_gettimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) - GLOBL ·libc_gettimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_gettimeofday_trampoline_addr(SB)/8, $libc_gettimeofday_trampoline<>(SB) TEXT libc_getuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getuid(SB) - GLOBL ·libc_getuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_getuid_trampoline_addr(SB)/8, $libc_getuid_trampoline<>(SB) TEXT libc_issetugid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_issetugid(SB) - GLOBL ·libc_issetugid_trampoline_addr(SB), RODATA, $8 DATA ·libc_issetugid_trampoline_addr(SB)/8, $libc_issetugid_trampoline<>(SB) TEXT libc_kqueue_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_kqueue(SB) - GLOBL ·libc_kqueue_trampoline_addr(SB), RODATA, $8 DATA ·libc_kqueue_trampoline_addr(SB)/8, $libc_kqueue_trampoline<>(SB) TEXT libc_lchown_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lchown(SB) - GLOBL ·libc_lchown_trampoline_addr(SB), RODATA, $8 DATA ·libc_lchown_trampoline_addr(SB)/8, $libc_lchown_trampoline<>(SB) TEXT libc_link_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_link(SB) - GLOBL ·libc_link_trampoline_addr(SB), RODATA, $8 DATA ·libc_link_trampoline_addr(SB)/8, $libc_link_trampoline<>(SB) TEXT libc_linkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_linkat(SB) - GLOBL ·libc_linkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_linkat_trampoline_addr(SB)/8, $libc_linkat_trampoline<>(SB) TEXT libc_listen_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_listen(SB) - GLOBL ·libc_listen_trampoline_addr(SB), RODATA, $8 DATA ·libc_listen_trampoline_addr(SB)/8, $libc_listen_trampoline<>(SB) TEXT libc_mkdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdir(SB) - GLOBL ·libc_mkdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdir_trampoline_addr(SB)/8, $libc_mkdir_trampoline<>(SB) TEXT libc_mkdirat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkdirat(SB) - GLOBL ·libc_mkdirat_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkdirat_trampoline_addr(SB)/8, $libc_mkdirat_trampoline<>(SB) TEXT libc_mkfifo_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mkfifo(SB) - GLOBL ·libc_mkfifo_trampoline_addr(SB), RODATA, $8 DATA ·libc_mkfifo_trampoline_addr(SB)/8, $libc_mkfifo_trampoline<>(SB) TEXT libc_mknod_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mknod(SB) - GLOBL ·libc_mknod_trampoline_addr(SB), RODATA, $8 DATA ·libc_mknod_trampoline_addr(SB)/8, $libc_mknod_trampoline<>(SB) TEXT libc_mount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mount(SB) - GLOBL ·libc_mount_trampoline_addr(SB), RODATA, $8 DATA ·libc_mount_trampoline_addr(SB)/8, $libc_mount_trampoline<>(SB) TEXT libc_open_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_open(SB) - GLOBL ·libc_open_trampoline_addr(SB), RODATA, $8 DATA ·libc_open_trampoline_addr(SB)/8, $libc_open_trampoline<>(SB) TEXT libc_openat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_openat(SB) - GLOBL ·libc_openat_trampoline_addr(SB), RODATA, $8 DATA ·libc_openat_trampoline_addr(SB)/8, $libc_openat_trampoline<>(SB) TEXT libc_pathconf_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pathconf(SB) - GLOBL ·libc_pathconf_trampoline_addr(SB), RODATA, $8 DATA ·libc_pathconf_trampoline_addr(SB)/8, $libc_pathconf_trampoline<>(SB) TEXT libc_pread_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pread(SB) - GLOBL ·libc_pread_trampoline_addr(SB), RODATA, $8 DATA ·libc_pread_trampoline_addr(SB)/8, $libc_pread_trampoline<>(SB) TEXT libc_pwrite_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_pwrite(SB) - GLOBL ·libc_pwrite_trampoline_addr(SB), RODATA, $8 DATA ·libc_pwrite_trampoline_addr(SB)/8, $libc_pwrite_trampoline<>(SB) TEXT libc_read_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_read(SB) - GLOBL ·libc_read_trampoline_addr(SB), RODATA, $8 DATA ·libc_read_trampoline_addr(SB)/8, $libc_read_trampoline<>(SB) TEXT libc_readlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlink(SB) - GLOBL ·libc_readlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlink_trampoline_addr(SB)/8, $libc_readlink_trampoline<>(SB) TEXT libc_readlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_readlinkat(SB) - GLOBL ·libc_readlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_readlinkat_trampoline_addr(SB)/8, $libc_readlinkat_trampoline<>(SB) TEXT libc_rename_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rename(SB) - GLOBL ·libc_rename_trampoline_addr(SB), RODATA, $8 DATA ·libc_rename_trampoline_addr(SB)/8, $libc_rename_trampoline<>(SB) TEXT libc_renameat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_renameat(SB) - GLOBL ·libc_renameat_trampoline_addr(SB), RODATA, $8 DATA ·libc_renameat_trampoline_addr(SB)/8, $libc_renameat_trampoline<>(SB) TEXT libc_revoke_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_revoke(SB) - GLOBL ·libc_revoke_trampoline_addr(SB), RODATA, $8 DATA ·libc_revoke_trampoline_addr(SB)/8, $libc_revoke_trampoline<>(SB) TEXT libc_rmdir_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_rmdir(SB) - GLOBL ·libc_rmdir_trampoline_addr(SB), RODATA, $8 DATA ·libc_rmdir_trampoline_addr(SB)/8, $libc_rmdir_trampoline<>(SB) TEXT libc_lseek_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lseek(SB) - GLOBL ·libc_lseek_trampoline_addr(SB), RODATA, $8 DATA ·libc_lseek_trampoline_addr(SB)/8, $libc_lseek_trampoline<>(SB) TEXT libc_select_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_select(SB) - GLOBL ·libc_select_trampoline_addr(SB), RODATA, $8 DATA ·libc_select_trampoline_addr(SB)/8, $libc_select_trampoline<>(SB) @@ -712,192 +595,160 @@ DATA ·libc_setattrlist_trampoline_addr(SB)/8, $libc_setattrlist_trampoline<>(SB TEXT libc_setegid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setegid(SB) - GLOBL ·libc_setegid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setegid_trampoline_addr(SB)/8, $libc_setegid_trampoline<>(SB) TEXT libc_seteuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_seteuid(SB) - GLOBL ·libc_seteuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_seteuid_trampoline_addr(SB)/8, $libc_seteuid_trampoline<>(SB) TEXT libc_setgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setgid(SB) - GLOBL ·libc_setgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setgid_trampoline_addr(SB)/8, $libc_setgid_trampoline<>(SB) TEXT libc_setlogin_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setlogin(SB) - GLOBL ·libc_setlogin_trampoline_addr(SB), RODATA, $8 DATA ·libc_setlogin_trampoline_addr(SB)/8, $libc_setlogin_trampoline<>(SB) TEXT libc_setpgid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpgid(SB) - GLOBL ·libc_setpgid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpgid_trampoline_addr(SB)/8, $libc_setpgid_trampoline<>(SB) TEXT libc_setpriority_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setpriority(SB) - GLOBL ·libc_setpriority_trampoline_addr(SB), RODATA, $8 DATA ·libc_setpriority_trampoline_addr(SB)/8, $libc_setpriority_trampoline<>(SB) TEXT libc_setprivexec_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setprivexec(SB) - GLOBL ·libc_setprivexec_trampoline_addr(SB), RODATA, $8 DATA ·libc_setprivexec_trampoline_addr(SB)/8, $libc_setprivexec_trampoline<>(SB) TEXT libc_setregid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setregid(SB) - GLOBL ·libc_setregid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setregid_trampoline_addr(SB)/8, $libc_setregid_trampoline<>(SB) TEXT libc_setreuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setreuid(SB) - GLOBL ·libc_setreuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setreuid_trampoline_addr(SB)/8, $libc_setreuid_trampoline<>(SB) TEXT libc_setsid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setsid(SB) - GLOBL ·libc_setsid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setsid_trampoline_addr(SB)/8, $libc_setsid_trampoline<>(SB) TEXT libc_settimeofday_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_settimeofday(SB) - GLOBL ·libc_settimeofday_trampoline_addr(SB), RODATA, $8 DATA ·libc_settimeofday_trampoline_addr(SB)/8, $libc_settimeofday_trampoline<>(SB) TEXT libc_setuid_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_setuid(SB) - GLOBL ·libc_setuid_trampoline_addr(SB), RODATA, $8 DATA ·libc_setuid_trampoline_addr(SB)/8, $libc_setuid_trampoline<>(SB) TEXT libc_symlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlink(SB) - GLOBL ·libc_symlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlink_trampoline_addr(SB)/8, $libc_symlink_trampoline<>(SB) TEXT libc_symlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_symlinkat(SB) - GLOBL ·libc_symlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_symlinkat_trampoline_addr(SB)/8, $libc_symlinkat_trampoline<>(SB) TEXT libc_sync_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sync(SB) - GLOBL ·libc_sync_trampoline_addr(SB), RODATA, $8 DATA ·libc_sync_trampoline_addr(SB)/8, $libc_sync_trampoline<>(SB) TEXT libc_truncate_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_truncate(SB) - GLOBL ·libc_truncate_trampoline_addr(SB), RODATA, $8 DATA ·libc_truncate_trampoline_addr(SB)/8, $libc_truncate_trampoline<>(SB) TEXT libc_umask_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_umask(SB) - GLOBL ·libc_umask_trampoline_addr(SB), RODATA, $8 DATA ·libc_umask_trampoline_addr(SB)/8, $libc_umask_trampoline<>(SB) TEXT libc_undelete_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_undelete(SB) - GLOBL ·libc_undelete_trampoline_addr(SB), RODATA, $8 DATA ·libc_undelete_trampoline_addr(SB)/8, $libc_undelete_trampoline<>(SB) TEXT libc_unlink_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlink(SB) - GLOBL ·libc_unlink_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlink_trampoline_addr(SB)/8, $libc_unlink_trampoline<>(SB) TEXT libc_unlinkat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unlinkat(SB) - GLOBL ·libc_unlinkat_trampoline_addr(SB), RODATA, $8 DATA ·libc_unlinkat_trampoline_addr(SB)/8, $libc_unlinkat_trampoline<>(SB) TEXT libc_unmount_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_unmount(SB) - GLOBL ·libc_unmount_trampoline_addr(SB), RODATA, $8 DATA ·libc_unmount_trampoline_addr(SB)/8, $libc_unmount_trampoline<>(SB) TEXT libc_write_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_write(SB) - GLOBL ·libc_write_trampoline_addr(SB), RODATA, $8 DATA ·libc_write_trampoline_addr(SB)/8, $libc_write_trampoline<>(SB) TEXT libc_mmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) - GLOBL ·libc_mmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_mmap_trampoline_addr(SB)/8, $libc_mmap_trampoline<>(SB) TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) - GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) - GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstat_trampoline_addr(SB)/8, $libc_fstat_trampoline<>(SB) TEXT libc_fstatat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatat(SB) - GLOBL ·libc_fstatat_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatat_trampoline_addr(SB)/8, $libc_fstatat_trampoline<>(SB) TEXT libc_fstatfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstatfs(SB) - GLOBL ·libc_fstatfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_fstatfs_trampoline_addr(SB)/8, $libc_fstatfs_trampoline<>(SB) TEXT libc_getfsstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_getfsstat(SB) - GLOBL ·libc_getfsstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_getfsstat_trampoline_addr(SB)/8, $libc_getfsstat_trampoline<>(SB) TEXT libc_lstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_lstat(SB) - GLOBL ·libc_lstat_trampoline_addr(SB), RODATA, $8 DATA ·libc_lstat_trampoline_addr(SB)/8, $libc_lstat_trampoline<>(SB) TEXT libc_ptrace_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) - GLOBL ·libc_ptrace_trampoline_addr(SB), RODATA, $8 DATA ·libc_ptrace_trampoline_addr(SB)/8, $libc_ptrace_trampoline<>(SB) TEXT libc_stat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_stat(SB) - GLOBL ·libc_stat_trampoline_addr(SB), RODATA, $8 DATA ·libc_stat_trampoline_addr(SB)/8, $libc_stat_trampoline<>(SB) TEXT libc_statfs_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_statfs(SB) - GLOBL ·libc_statfs_trampoline_addr(SB), RODATA, $8 DATA ·libc_statfs_trampoline_addr(SB)/8, $libc_statfs_trampoline<>(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index 0eabac7ad..0c67df64a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1642,28 +1642,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index ee313eb00..e6e05d145 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 4c986e448..7508accac 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 555216944..7b56aead4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 67a226fbf..cc623dcaa 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index f0b9ddaaa..581849197 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -1862,28 +1862,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func accept4(fd int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (nfd int, err error) { r0, _, e1 := Syscall6(SYS_ACCEPT4, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) nfd = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go index b57c7050d..6be25cd19 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_illumos_amd64.go @@ -40,7 +40,7 @@ func readv(fd int, iovs []Iovec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procreadv)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -55,7 +55,7 @@ func preadv(fd int, iovs []Iovec, off int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpreadv)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -70,7 +70,7 @@ func writev(fd int, iovs []Iovec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwritev)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -85,7 +85,7 @@ func pwritev(fd int, iovs []Iovec, off int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwritev)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(iovs)), uintptr(off), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -96,7 +96,7 @@ func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept4)), 4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index a07321bed..1ff3aec74 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -1734,28 +1734,6 @@ func exitThread(code int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, p *byte, np int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func readv(fd int, iovs []Iovec) (n int, err error) { var _p0 unsafe.Pointer if len(iovs) > 0 { @@ -2197,3 +2175,23 @@ func getresgid(rgid *_C_int, egid *_C_int, sgid *_C_int) { RawSyscallNoError(SYS_GETRESGID, uintptr(unsafe.Pointer(rgid)), uintptr(unsafe.Pointer(egid)), uintptr(unsafe.Pointer(sgid))) return } + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedSetattr(pid int, attr *SchedAttr, flags uint) (err error) { + _, _, e1 := Syscall(SYS_SCHED_SETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(flags)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func schedGetattr(pid int, attr *SchedAttr, size uint, flags uint) (err error) { + _, _, e1 := Syscall6(SYS_SCHED_GETATTR, uintptr(pid), uintptr(unsafe.Pointer(attr)), uintptr(size), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 35f499b32..2df3c5bac 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 3cda65b0d..a60556bab 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 1e1fea902..9f788917a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 3b77da110..82a4cb2dc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -1824,28 +1824,6 @@ func munmap(addr uintptr, length uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 9ab9abf72..66b3b6456 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 915761eab..c5c4cc112 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -2213,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 8e87fdf15..93bfbb328 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 12a7a2160..a107b8fda 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index b19e8aa03..c427de509 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index fb99594c9..60c1a99ae 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 32cbbbc52..52eba360f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -549,6 +549,12 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +var libc_ioctl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { @@ -557,10 +563,6 @@ func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { return } -var libc_ioctl_trampoline_addr uintptr - -//go:cgo_import_dynamic libc_ioctl ioctl "libc.so" - // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { @@ -2211,28 +2213,6 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_read_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - -func writelen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(libc_write_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 609d1c598..b40189464 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -436,7 +436,7 @@ func pipe(p *[2]_C_int) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe)), 1, uintptr(unsafe.Pointer(p)), 0, 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -446,7 +446,7 @@ func pipe(p *[2]_C_int) (n int, err error) { func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procpipe2)), 2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -456,7 +456,7 @@ func pipe2(p *[2]_C_int, flags int) (err error) { func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetsockname)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -471,7 +471,7 @@ func Getcwd(buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetcwd)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -482,7 +482,7 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -492,7 +492,7 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { func setgroups(ngid int, gid *_Gid_t) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procsetgroups)), 2, uintptr(ngid), uintptr(unsafe.Pointer(gid)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -503,7 +503,7 @@ func wait4(pid int32, statusp *_C_int, options int, rusage *Rusage) (wpid int32, r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwait4)), 4, uintptr(pid), uintptr(unsafe.Pointer(statusp)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0) wpid = int32(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -518,7 +518,7 @@ func gethostname(buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -533,7 +533,7 @@ func utimes(path string, times *[2]Timeval) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimes)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -548,7 +548,7 @@ func utimensat(fd int, path string, times *[2]Timespec, flag int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procutimensat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), uintptr(flag), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -559,7 +559,7 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -569,7 +569,7 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { func futimesat(fildes int, path *byte, times *[2]Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfutimesat)), 3, uintptr(fildes), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -580,7 +580,7 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procaccept)), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -591,7 +591,7 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_recvmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -602,7 +602,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendmsg)), 3, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -612,7 +612,7 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { func acct(path *byte) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procacct)), 1, uintptr(unsafe.Pointer(path)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -647,7 +647,7 @@ func ioctlRet(fd int, req int, arg uintptr) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -658,7 +658,7 @@ func ioctlPtrRet(fd int, req int, arg unsafe.Pointer) (ret int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) ret = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -669,7 +669,7 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -684,7 +684,7 @@ func Access(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAccess)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -694,7 +694,7 @@ func Access(path string, mode uint32) (err error) { func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procAdjtime)), 2, uintptr(unsafe.Pointer(delta)), uintptr(unsafe.Pointer(olddelta)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -709,7 +709,7 @@ func Chdir(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -724,7 +724,7 @@ func Chmod(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChmod)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -739,7 +739,7 @@ func Chown(path string, uid int, gid int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -754,7 +754,7 @@ func Chroot(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procChroot)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -764,7 +764,7 @@ func Chroot(path string) (err error) { func ClockGettime(clockid int32, time *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClockGettime)), 2, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -774,7 +774,7 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { func Close(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procClose)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -790,7 +790,7 @@ func Creat(path string, mode uint32) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procCreat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -801,7 +801,7 @@ func Dup(fd int) (nfd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup)), 1, uintptr(fd), 0, 0, 0, 0, 0) nfd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -811,7 +811,7 @@ func Dup(fd int) (nfd int, err error) { func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procDup2)), 2, uintptr(oldfd), uintptr(newfd), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -833,7 +833,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFaccessat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -843,7 +843,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { func Fchdir(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchdir)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -853,7 +853,7 @@ func Fchdir(fd int) (err error) { func Fchmod(fd int, mode uint32) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmod)), 2, uintptr(fd), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -868,7 +868,7 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchmodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -878,7 +878,7 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchown)), 3, uintptr(fd), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -893,7 +893,7 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFchownat)), 5, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -903,7 +903,7 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { func Fdatasync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFdatasync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -913,7 +913,7 @@ func Fdatasync(fd int) (err error) { func Flock(fd int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFlock)), 2, uintptr(fd), uintptr(how), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -924,7 +924,7 @@ func Fpathconf(fd int, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFpathconf)), 2, uintptr(fd), uintptr(name), 0, 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -934,7 +934,7 @@ func Fpathconf(fd int, name int) (val int, err error) { func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstat)), 2, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -949,7 +949,7 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -959,7 +959,7 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -974,7 +974,7 @@ func Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetdents)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1001,7 +1001,7 @@ func Getpgid(pid int) (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgid)), 1, uintptr(pid), 0, 0, 0, 0, 0) pgid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1012,7 +1012,7 @@ func Getpgrp() (pgid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetpgrp)), 0, 0, 0, 0, 0, 0, 0) pgid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1047,7 +1047,7 @@ func Getpriority(which int, who int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procGetpriority)), 2, uintptr(which), uintptr(who), 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1057,7 +1057,7 @@ func Getpriority(which int, who int) (n int, err error) { func Getrlimit(which int, lim *Rlimit) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrlimit)), 2, uintptr(which), uintptr(unsafe.Pointer(lim)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1067,7 +1067,7 @@ func Getrlimit(which int, lim *Rlimit) (err error) { func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetrusage)), 2, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1078,7 +1078,7 @@ func Getsid(pid int) (sid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGetsid)), 1, uintptr(pid), 0, 0, 0, 0, 0) sid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1088,7 +1088,7 @@ func Getsid(pid int) (sid int, err error) { func Gettimeofday(tv *Timeval) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procGettimeofday)), 1, uintptr(unsafe.Pointer(tv)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1106,7 +1106,7 @@ func Getuid() (uid int) { func Kill(pid int, signum syscall.Signal) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procKill)), 2, uintptr(pid), uintptr(signum), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1121,7 +1121,7 @@ func Lchown(path string, uid int, gid int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLchown)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1141,7 +1141,7 @@ func Link(path string, link string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1151,7 +1151,7 @@ func Link(path string, link string) (err error) { func Listen(s int, backlog int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1166,7 +1166,7 @@ func Lstat(path string, stat *Stat_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procLstat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1180,7 +1180,7 @@ func Madvise(b []byte, advice int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMadvise)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(advice), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1195,7 +1195,7 @@ func Mkdir(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdir)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1210,7 +1210,7 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkdirat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1225,7 +1225,7 @@ func Mkfifo(path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifo)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1240,7 +1240,7 @@ func Mkfifoat(dirfd int, path string, mode uint32) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMkfifoat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1255,7 +1255,7 @@ func Mknod(path string, mode uint32, dev int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknod)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1270,7 +1270,7 @@ func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMknodat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1284,7 +1284,7 @@ func Mlock(b []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1294,7 +1294,7 @@ func Mlock(b []byte) (err error) { func Mlockall(flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMlockall)), 1, uintptr(flags), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1308,7 +1308,7 @@ func Mprotect(b []byte, prot int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMprotect)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(prot), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1322,7 +1322,7 @@ func Msync(b []byte, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMsync)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(flags), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1336,7 +1336,7 @@ func Munlock(b []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlock)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1346,7 +1346,7 @@ func Munlock(b []byte) (err error) { func Munlockall() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMunlockall)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1356,7 +1356,7 @@ func Munlockall() (err error) { func Nanosleep(time *Timespec, leftover *Timespec) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procNanosleep)), 2, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1372,7 +1372,7 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpen)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1388,7 +1388,7 @@ func Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procOpenat)), 4, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1404,7 +1404,7 @@ func Pathconf(path string, name int) (val int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPathconf)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(name), 0, 0, 0, 0) val = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1414,7 +1414,7 @@ func Pathconf(path string, name int) (val int, err error) { func Pause() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procPause)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1429,7 +1429,7 @@ func pread(fd int, p []byte, offset int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpread)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1444,7 +1444,7 @@ func pwrite(fd int, p []byte, offset int64) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpwrite)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(offset), 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1459,7 +1459,7 @@ func read(fd int, p []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procread)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1479,7 +1479,7 @@ func Readlink(path string, buf []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procReadlink)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(len(buf)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1499,7 +1499,7 @@ func Rename(from string, to string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRename)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1519,7 +1519,7 @@ func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err e } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRenameat)), 4, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1534,7 +1534,7 @@ func Rmdir(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procRmdir)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1545,7 +1545,7 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proclseek)), 3, uintptr(fd), uintptr(offset), uintptr(whence), 0, 0, 0) newoffset = int64(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1556,7 +1556,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1566,7 +1566,7 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err func Setegid(egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetegid)), 1, uintptr(egid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1576,7 +1576,7 @@ func Setegid(egid int) (err error) { func Seteuid(euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSeteuid)), 1, uintptr(euid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1586,7 +1586,7 @@ func Seteuid(euid int) (err error) { func Setgid(gid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetgid)), 1, uintptr(gid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1600,7 +1600,7 @@ func Sethostname(p []byte) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSethostname)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1610,7 +1610,7 @@ func Sethostname(p []byte) (err error) { func Setpgid(pid int, pgid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetpgid)), 2, uintptr(pid), uintptr(pgid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1620,7 +1620,7 @@ func Setpgid(pid int, pgid int) (err error) { func Setpriority(which int, who int, prio int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSetpriority)), 3, uintptr(which), uintptr(who), uintptr(prio), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1630,7 +1630,7 @@ func Setpriority(which int, who int, prio int) (err error) { func Setregid(rgid int, egid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetregid)), 2, uintptr(rgid), uintptr(egid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1640,7 +1640,7 @@ func Setregid(rgid int, egid int) (err error) { func Setreuid(ruid int, euid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetreuid)), 2, uintptr(ruid), uintptr(euid), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1651,7 +1651,7 @@ func Setsid() (pid int, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetsid)), 0, 0, 0, 0, 0, 0, 0) pid = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1661,7 +1661,7 @@ func Setsid() (pid int, err error) { func Setuid(uid int) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procSetuid)), 1, uintptr(uid), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1671,7 +1671,7 @@ func Setuid(uid int) (err error) { func Shutdown(s int, how int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procshutdown)), 2, uintptr(s), uintptr(how), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1686,7 +1686,7 @@ func Stat(path string, stat *Stat_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStat)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1701,7 +1701,7 @@ func Statvfs(path string, vfsstat *Statvfs_t) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procStatvfs)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1721,7 +1721,7 @@ func Symlink(path string, link string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSymlink)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1731,7 +1731,7 @@ func Symlink(path string, link string) (err error) { func Sync() (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSync)), 0, 0, 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1742,7 +1742,7 @@ func Sysconf(which int) (n int64, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSysconf)), 1, uintptr(which), 0, 0, 0, 0, 0) n = int64(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1753,7 +1753,7 @@ func Times(tms *Tms) (ticks uintptr, err error) { r0, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procTimes)), 1, uintptr(unsafe.Pointer(tms)), 0, 0, 0, 0, 0) ticks = uintptr(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1768,7 +1768,7 @@ func Truncate(path string, length int64) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procTruncate)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1778,7 +1778,7 @@ func Truncate(path string, length int64) (err error) { func Fsync(fd int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFsync)), 1, uintptr(fd), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1788,7 +1788,7 @@ func Fsync(fd int) (err error) { func Ftruncate(fd int, length int64) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFtruncate)), 2, uintptr(fd), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1806,7 +1806,7 @@ func Umask(mask int) (oldmask int) { func Uname(buf *Utsname) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procUname)), 1, uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1821,7 +1821,7 @@ func Unmount(target string, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procumount)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1836,7 +1836,7 @@ func Unlink(path string) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlink)), 1, uintptr(unsafe.Pointer(_p0)), 0, 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1851,7 +1851,7 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUnlinkat)), 3, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1861,7 +1861,7 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { func Ustat(dev int, ubuf *Ustat_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUstat)), 2, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1876,7 +1876,7 @@ func Utime(path string, buf *Utimbuf) (err error) { } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procUtime)), 2, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1886,7 +1886,7 @@ func Utime(path string, buf *Utimbuf) (err error) { func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_bind)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1896,7 +1896,7 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_connect)), 3, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1907,7 +1907,7 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmmap)), 6, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flag), uintptr(fd), uintptr(pos)) ret = uintptr(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1917,7 +1917,7 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( func munmap(addr uintptr, length uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procmunmap)), 2, uintptr(addr), uintptr(length), 0, 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1928,7 +1928,7 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1942,7 +1942,7 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( } _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_sendto)), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(addrlen)) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1953,7 +1953,7 @@ func socket(domain int, typ int, proto int) (fd int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socket)), 3, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0) fd = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1963,7 +1963,7 @@ func socket(domain int, typ int, proto int) (fd int, err error) { func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&proc__xnet_socketpair)), 4, uintptr(domain), uintptr(typ), uintptr(proto), uintptr(unsafe.Pointer(fd)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1978,7 +1978,7 @@ func write(fd int, p []byte) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procwrite)), 3, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1988,7 +1988,7 @@ func write(fd int, p []byte) (n int, err error) { func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_getsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -1998,7 +1998,7 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { _, _, e1 := rawSysvicall6(uintptr(unsafe.Pointer(&procgetpeername)), 3, uintptr(fd), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2008,7 +2008,7 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsetsockopt)), 5, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(vallen), 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2023,7 +2023,7 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procrecvfrom)), 6, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2034,7 +2034,7 @@ func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2045,7 +2045,7 @@ func port_associate(port int, source int, object uintptr, events int, user *byte r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_associate)), 5, uintptr(port), uintptr(source), uintptr(object), uintptr(events), uintptr(unsafe.Pointer(user)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2056,7 +2056,7 @@ func port_dissociate(port int, source int, object uintptr) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_dissociate)), 3, uintptr(port), uintptr(source), uintptr(object), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2067,7 +2067,7 @@ func port_get(port int, pe *portEvent, timeout *Timespec) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_get)), 3, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(unsafe.Pointer(timeout)), 0, 0, 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2078,7 +2078,7 @@ func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Times r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_getn)), 5, uintptr(port), uintptr(unsafe.Pointer(pe)), uintptr(max), uintptr(unsafe.Pointer(nget)), uintptr(unsafe.Pointer(timeout)), 0) n = int(r0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2088,7 +2088,7 @@ func port_getn(port int, pe *portEvent, max uint32, nget *uint32, timeout *Times func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procputmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(flags), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } @@ -2098,7 +2098,7 @@ func putmsg(fd int, clptr *strbuf, dataptr *strbuf, flags int) (err error) { func getmsg(fd int, clptr *strbuf, dataptr *strbuf, flags *int) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetmsg)), 4, uintptr(fd), uintptr(unsafe.Pointer(clptr)), uintptr(unsafe.Pointer(dataptr)), uintptr(unsafe.Pointer(flags)), 0, 0) if e1 != 0 { - err = e1 + err = errnoErr(e1) } return } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index c31681743..1d8fe1d4b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -40,17 +40,6 @@ func read(fd int, p []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func readlen(fd int, buf *byte, nbuf int) (n int, err error) { - r0, _, e1 := syscall_syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(buf)), uintptr(nbuf)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func write(fd int, p []byte) (n int, err error) { var _p0 unsafe.Pointer if len(p) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c9c4ad031..9862853d3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -447,4 +447,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 12ff3417c..8901f0f4e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -369,4 +369,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index c3fb5e77a..6902c37ee 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -411,4 +411,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 358c847a4..a6d3dff81 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -314,4 +314,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 81c4849b1..b18f3f710 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -308,4 +308,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 202a57e90..0302e5e3d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -431,4 +431,5 @@ const ( SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 1fbceb52d..6693ba4a0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -361,4 +361,5 @@ const ( SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index b4ffb7a20..fd93f4987 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -361,4 +361,5 @@ const ( SYS_PROCESS_MRELEASE = 5448 SYS_FUTEX_WAITV = 5449 SYS_SET_MEMPOLICY_HOME_NODE = 5450 + SYS_CACHESTAT = 5451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 867985f9b..760ddcadc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -431,4 +431,5 @@ const ( SYS_PROCESS_MRELEASE = 4448 SYS_FUTEX_WAITV = 4449 SYS_SET_MEMPOLICY_HOME_NODE = 4450 + SYS_CACHESTAT = 4451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index a8cce69ed..cff2b2555 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -438,4 +438,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index d44c5b39d..a4b2405d0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -410,4 +410,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 4214dd9c0..aca54b4e3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -410,4 +410,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index ef285c567..9d1738d64 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -315,4 +315,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index e6ed7d637..022878dc8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -376,4 +376,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 92f628ef4..4100a761c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -389,4 +389,5 @@ const ( SYS_PROCESS_MRELEASE = 448 SYS_FUTEX_WAITV = 449 SYS_SET_MEMPOLICY_HOME_NODE = 450 + SYS_CACHESTAT = 451 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 26ef52aaf..18aa70b42 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1977,7 +1977,7 @@ const ( NFT_MSG_GETFLOWTABLE = 0x17 NFT_MSG_DELFLOWTABLE = 0x18 NFT_MSG_GETRULE_RESET = 0x19 - NFT_MSG_MAX = 0x21 + NFT_MSG_MAX = 0x22 NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 @@ -4499,7 +4499,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x145 + NL80211_ATTR_MAX = 0x146 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4869,7 +4869,7 @@ const ( NL80211_CMD_LEAVE_IBSS = 0x2c NL80211_CMD_LEAVE_MESH = 0x45 NL80211_CMD_LEAVE_OCB = 0x6d - NL80211_CMD_MAX = 0x99 + NL80211_CMD_MAX = 0x9a NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29 NL80211_CMD_MODIFY_LINK_STA = 0x97 NL80211_CMD_NAN_MATCH = 0x78 @@ -5503,7 +5503,7 @@ const ( NL80211_RATE_INFO_HE_RU_ALLOC_52 = 0x1 NL80211_RATE_INFO_HE_RU_ALLOC_996 = 0x5 NL80211_RATE_INFO_HE_RU_ALLOC = 0x11 - NL80211_RATE_INFO_MAX = 0x16 + NL80211_RATE_INFO_MAX = 0x1d NL80211_RATE_INFO_MCS = 0x2 NL80211_RATE_INFO_SHORT_GI = 0x4 NL80211_RATE_INFO_VHT_MCS = 0x6 @@ -5868,3 +5868,18 @@ const ( VIRTIO_NET_HDR_GSO_UDP_L4 = 0x5 VIRTIO_NET_HDR_GSO_ECN = 0x80 ) + +type SchedAttr struct { + Size uint32 + Policy uint32 + Flags uint64 + Nice int32 + Priority uint32 + Runtime uint64 + Deadline uint64 + Period uint64 + Util_min uint32 + Util_max uint32 +} + +const SizeofSchedAttr = 0x38 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 83c69c119..1b4c97c32 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -733,6 +733,10 @@ const ( RISCV_HWPROBE_KEY_IMA_EXT_0 = 0x4 RISCV_HWPROBE_IMA_FD = 0x1 RISCV_HWPROBE_IMA_C = 0x2 + RISCV_HWPROBE_IMA_V = 0x4 + RISCV_HWPROBE_EXT_ZBA = 0x8 + RISCV_HWPROBE_EXT_ZBB = 0x10 + RISCV_HWPROBE_EXT_ZBS = 0x20 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go index a52e0331d..9cabbb694 100644 --- a/vendor/golang.org/x/sys/windows/exec_windows.go +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -22,7 +22,7 @@ import ( // but only if there is space or tab inside s. func EscapeArg(s string) string { if len(s) == 0 { - return "\"\"" + return `""` } n := len(s) hasSpace := false @@ -35,7 +35,7 @@ func EscapeArg(s string) string { } } if hasSpace { - n += 2 + n += 2 // Reserve space for quotes. } if n == len(s) { return s @@ -82,20 +82,68 @@ func EscapeArg(s string) string { // in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument, // or any program that uses CommandLineToArgv. func ComposeCommandLine(args []string) string { - var commandLine string - for i := range args { - if i > 0 { - commandLine += " " - } - commandLine += EscapeArg(args[i]) + if len(args) == 0 { + return "" } - return commandLine + + // Per https://learn.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-commandlinetoargvw: + // “This function accepts command lines that contain a program name; the + // program name can be enclosed in quotation marks or not.” + // + // Unfortunately, it provides no means of escaping interior quotation marks + // within that program name, and we have no way to report them here. + prog := args[0] + mustQuote := len(prog) == 0 + for i := 0; i < len(prog); i++ { + c := prog[i] + if c <= ' ' || (c == '"' && i == 0) { + // Force quotes for not only the ASCII space and tab as described in the + // MSDN article, but also ASCII control characters. + // The documentation for CommandLineToArgvW doesn't say what happens when + // the first argument is not a valid program name, but it empirically + // seems to drop unquoted control characters. + mustQuote = true + break + } + } + var commandLine []byte + if mustQuote { + commandLine = make([]byte, 0, len(prog)+2) + commandLine = append(commandLine, '"') + for i := 0; i < len(prog); i++ { + c := prog[i] + if c == '"' { + // This quote would interfere with our surrounding quotes. + // We have no way to report an error, so just strip out + // the offending character instead. + continue + } + commandLine = append(commandLine, c) + } + commandLine = append(commandLine, '"') + } else { + if len(args) == 1 { + // args[0] is a valid command line representing itself. + // No need to allocate a new slice or string for it. + return prog + } + commandLine = []byte(prog) + } + + for _, arg := range args[1:] { + commandLine = append(commandLine, ' ') + // TODO(bcmills): since we're already appending to a slice, it would be nice + // to avoid the intermediate allocations of EscapeArg. + // Perhaps we can factor out an appendEscapedArg function. + commandLine = append(commandLine, EscapeArg(arg)...) + } + return string(commandLine) } // DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv, // as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that // command lines are passed around. -// DecomposeCommandLine returns error if commandLine contains NUL. +// DecomposeCommandLine returns an error if commandLine contains NUL. func DecomposeCommandLine(commandLine string) ([]string, error) { if len(commandLine) == 0 { return []string{}, nil @@ -105,18 +153,35 @@ func DecomposeCommandLine(commandLine string) ([]string, error) { return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine") } var argc int32 - argv, err := CommandLineToArgv(&utf16CommandLine[0], &argc) + argv, err := commandLineToArgv(&utf16CommandLine[0], &argc) if err != nil { return nil, err } defer LocalFree(Handle(unsafe.Pointer(argv))) + var args []string - for _, v := range (*argv)[:argc] { - args = append(args, UTF16ToString((*v)[:])) + for _, p := range unsafe.Slice(argv, argc) { + args = append(args, UTF16PtrToString(p)) } return args, nil } +// CommandLineToArgv parses a Unicode command line string and sets +// argc to the number of parsed arguments. +// +// The returned memory should be freed using a single call to LocalFree. +// +// Note that although the return type of CommandLineToArgv indicates 8192 +// entries of up to 8192 characters each, the actual count of parsed arguments +// may exceed 8192, and the documentation for CommandLineToArgvW does not mention +// any bound on the lengths of the individual argument strings. +// (See https://go.dev/issue/63236.) +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + argp, err := commandLineToArgv(cmd, argc) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(argp)) + return argv, err +} + func CloseOnExec(fd Handle) { SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) } diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index d414ef13b..26be94a8a 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -7,8 +7,6 @@ package windows import ( "syscall" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) const ( @@ -1341,21 +1339,14 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() sdLen = min } - var src []byte - h := (*unsafeheader.Slice)(unsafe.Pointer(&src)) - h.Data = unsafe.Pointer(selfRelativeSD) - h.Len = sdLen - h.Cap = sdLen - + src := unsafe.Slice((*byte)(unsafe.Pointer(selfRelativeSD)), sdLen) + // SECURITY_DESCRIPTOR has pointers in it, which means checkptr expects for it to + // be aligned properly. When we're copying a Windows-allocated struct to a + // Go-allocated one, make sure that the Go allocation is aligned to the + // pointer size. const psize = int(unsafe.Sizeof(uintptr(0))) - - var dst []byte - h = (*unsafeheader.Slice)(unsafe.Pointer(&dst)) alloc := make([]uintptr, (sdLen+psize-1)/psize) - h.Data = (*unsafeheader.Slice)(unsafe.Pointer(&alloc)).Data - h.Len = sdLen - h.Cap = sdLen - + dst := unsafe.Slice((*byte)(unsafe.Pointer(&alloc[0])), sdLen) copy(dst, src) return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0])) } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 373d16388..35cfc57ca 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -15,8 +15,6 @@ import ( "time" "unicode/utf16" "unsafe" - - "golang.org/x/sys/internal/unsafeheader" ) type Handle uintptr @@ -216,7 +214,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) -//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -240,7 +238,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW //sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW //sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW -//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW //sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] //sys LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) //sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) @@ -299,12 +297,15 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue //sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId //sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId +//sys ClosePseudoConsole(console Handle) = kernel32.ClosePseudoConsole +//sys createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) = kernel32.CreatePseudoConsole //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole //sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot //sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW //sys Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW @@ -437,6 +438,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute //sys DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute +// Windows Multimedia API +//sys TimeBeginPeriod (period uint32) (err error) [failretval != 0] = winmm.timeBeginPeriod +//sys TimeEndPeriod (period uint32) (err error) [failretval != 0] = winmm.timeEndPeriod + // syscall interface implementation for other packages // GetCurrentProcess returns the handle for the current process. @@ -1624,6 +1629,11 @@ func SetConsoleCursorPosition(console Handle, position Coord) error { return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position)))) } +func GetStartupInfo(startupInfo *StartupInfo) error { + getStartupInfo(startupInfo) + return nil +} + func (s NTStatus) Errno() syscall.Errno { return rtlNtStatusToDosErrorNoTeb(s) } @@ -1658,12 +1668,8 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) { // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. func (s *NTUnicodeString) Slice() []uint16 { - var slice []uint16 - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTUnicodeString) String() string { @@ -1686,12 +1692,8 @@ func NewNTString(s string) (*NTString, error) { // Slice returns a byte slice that aliases the data in the NTString. func (s *NTString) Slice() []byte { - var slice []byte - hdr := (*unsafeheader.Slice)(unsafe.Pointer(&slice)) - hdr.Data = unsafe.Pointer(s.Buffer) - hdr.Len = int(s.Length) - hdr.Cap = int(s.MaximumLength) - return slice + slice := unsafe.Slice(s.Buffer, s.MaximumLength) + return slice[:s.Length] } func (s *NTString) String() string { @@ -1743,10 +1745,7 @@ func LoadResourceData(module, resInfo Handle) (data []byte, err error) { if err != nil { return } - h := (*unsafeheader.Slice)(unsafe.Pointer(&data)) - h.Data = unsafe.Pointer(ptr) - h.Len = int(size) - h.Cap = int(size) + data = unsafe.Slice((*byte)(unsafe.Pointer(ptr)), size) return } @@ -1817,3 +1816,17 @@ type PSAPI_WORKING_SET_EX_INFORMATION struct { // A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress. VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK } + +// CreatePseudoConsole creates a windows pseudo console. +func CreatePseudoConsole(size Coord, in Handle, out Handle, flags uint32, pconsole *Handle) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), in, out, flags, pconsole) +} + +// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`. +func ResizePseudoConsole(pconsole Handle, size Coord) error { + // We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only + // accept arguments that can be casted to uintptr, and Coord can't. + return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 88e62a638..b88dc7c85 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -247,6 +247,7 @@ const ( PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007 PROC_THREAD_ATTRIBUTE_UMS_THREAD = 0x00030006 PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL = 0x0002000b + PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x00020016 ) const ( @@ -2139,6 +2140,12 @@ const ( ENABLE_LVB_GRID_WORLDWIDE = 0x10 ) +// Pseudo console related constants used for the flags parameter to +// CreatePseudoConsole. See: https://learn.microsoft.com/en-us/windows/console/createpseudoconsole +const ( + PSEUDOCONSOLE_INHERIT_CURSOR = 0x1 +) + type Coord struct { X int16 Y int16 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 566dd3e31..8b1688de4 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -55,6 +55,7 @@ var ( moduser32 = NewLazySystemDLL("user32.dll") moduserenv = NewLazySystemDLL("userenv.dll") modversion = NewLazySystemDLL("version.dll") + modwinmm = NewLazySystemDLL("winmm.dll") modwintrust = NewLazySystemDLL("wintrust.dll") modws2_32 = NewLazySystemDLL("ws2_32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") @@ -187,6 +188,7 @@ var ( procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCloseHandle = modkernel32.NewProc("CloseHandle") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") procCreateEventExW = modkernel32.NewProc("CreateEventExW") @@ -201,6 +203,7 @@ var ( procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreatePipe = modkernel32.NewProc("CreatePipe") procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") @@ -327,6 +330,7 @@ var ( procReleaseMutex = modkernel32.NewProc("ReleaseMutex") procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") procResetEvent = modkernel32.NewProc("ResetEvent") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") @@ -468,6 +472,8 @@ var ( procGetFileVersionInfoSizeW = modversion.NewProc("GetFileVersionInfoSizeW") procGetFileVersionInfoW = modversion.NewProc("GetFileVersionInfoW") procVerQueryValueW = modversion.NewProc("VerQueryValueW") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + proctimeEndPeriod = modwinmm.NewProc("timeEndPeriod") procWinVerifyTrustEx = modwintrust.NewProc("WinVerifyTrustEx") procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") @@ -1630,6 +1636,11 @@ func CloseHandle(handle Handle) (err error) { return } +func ClosePseudoConsole(console Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0) + return +} + func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) { r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0) if r1 == 0 { @@ -1759,6 +1770,14 @@ func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityA return } +func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) if r1&0xff == 0 { @@ -2367,11 +2386,8 @@ func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uin return } -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) - if r1 == 0 { - err = errnoErr(e1) - } +func getStartupInfo(startupInfo *StartupInfo) { + syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) return } @@ -2862,6 +2878,14 @@ func ResetEvent(event Handle) (err error) { return } +func resizePseudoConsole(pconsole Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0) + if r0 != 0 { + hr = syscall.Errno(r0) + } + return +} + func ResumeThread(thread Handle) (ret uint32, err error) { r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) ret = uint32(r0) @@ -3820,9 +3844,9 @@ func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (er return } -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { +func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) { r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + argv = (**uint16)(unsafe.Pointer(r0)) if argv == nil { err = errnoErr(e1) } @@ -4017,6 +4041,22 @@ func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPoint return } +func TimeBeginPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func TimeEndPeriod(period uint32) (err error) { + r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) { r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data))) if r0 != 0 { diff --git a/vendor/golang.org/x/text/unicode/norm/trie.go b/vendor/golang.org/x/text/unicode/norm/trie.go index 423386bf4..e4250ae22 100644 --- a/vendor/golang.org/x/text/unicode/norm/trie.go +++ b/vendor/golang.org/x/text/unicode/norm/trie.go @@ -29,7 +29,7 @@ var ( nfkcData = newNfkcTrie(0) ) -// lookupValue determines the type of block n and looks up the value for b. +// lookup determines the type of block n and looks up the value for b. // For n < t.cutoff, the block is a simple lookup table. Otherwise, the block // is a list of ranges with an accompanying value. Given a matching range r, // the value for b is by r.value + (b - r.lo) * stride. diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go index 0a6304d51..5dfff4cb2 100644 --- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go +++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go @@ -8,6 +8,17 @@ // // For product documentation, see: https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials // +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// // # Creating a client // // Usage example: @@ -17,24 +28,26 @@ // ctx := context.Background() // iamcredentialsService, err := iamcredentials.NewService(ctx) // -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // # Other authentication options // -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: // // iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithAPIKey("AIza...")) // -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // iamcredentialsService, err := iamcredentials.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // -// See https://godoc.org/google.golang.org/api/option/ for details on options. +// See [google.golang.org/api/option.ClientOption] for details on options. package iamcredentials // import "google.golang.org/api/iamcredentials/v1" import ( diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 92b3acf6e..05165f333 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -78,9 +78,8 @@ const ( // met: // // (1) At least one of the following is true: -// (a) No scope is provided -// (b) Scope for self-signed JWT flow is enabled -// (c) Audiences are explicitly provided by users +// (a) Scope for self-signed JWT flow is enabled +// (b) Audiences are explicitly provided by users // (2) No service account impersontation // // - Otherwise, executes standard OAuth 2.0 flow diff --git a/vendor/google.golang.org/api/internal/s2a.go b/vendor/google.golang.org/api/internal/s2a.go index c5b421f55..c70f2419b 100644 --- a/vendor/google.golang.org/api/internal/s2a.go +++ b/vendor/google.golang.org/api/internal/s2a.go @@ -13,7 +13,7 @@ import ( "cloud.google.com/go/compute/metadata" ) -const configEndpointSuffix = "googleAutoMtlsConfiguration" +const configEndpointSuffix = "instance/platform-security/auto-mtls-configuration" // The period an MTLS config can be reused before needing refresh. var configExpiry = time.Hour diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 3a3874df1..84f9302dc 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -9,6 +9,8 @@ import ( "crypto/tls" "errors" "net/http" + "os" + "strconv" "golang.org/x/oauth2" "golang.org/x/oauth2/google" @@ -16,6 +18,10 @@ import ( "google.golang.org/grpc" ) +const ( + newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB" +) + // DialSettings holds information needed to establish a connection with a // Google API service. type DialSettings struct { @@ -47,6 +53,7 @@ type DialSettings struct { ImpersonationConfig *impersonate.Config EnableDirectPath bool EnableDirectPathXds bool + EnableNewAuthLibrary bool AllowNonDefaultServiceAccount bool // Google API system parameters. For more information please read: @@ -77,6 +84,16 @@ func (ds *DialSettings) HasCustomAudience() bool { return len(ds.Audiences) > 0 } +func (ds *DialSettings) IsNewAuthLibraryEnabled() bool { + if ds.EnableNewAuthLibrary { + return true + } + if b, err := strconv.ParseBool(os.Getenv(newAuthLibEnVar)); err == nil { + return b + } + return false +} + // Validate reports an error if ds is invalid. func (ds *DialSettings) Validate() error { if ds.SkipValidation { diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 06fd41703..0674cb4f6 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.138.0" +const Version = "0.149.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 3b8461d1d..b2b249eec 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -150,6 +150,19 @@ func (w *withCreds) Apply(o *internal.DialSettings) { o.InternalCredentials = (*google.Credentials)(w) } +// EnableNewAuthLibrary returns a ClientOption that specifies if libraries in this +// module to delegate auth to our new library. This option will be removed in +// the future once all clients have been moved to the new auth layer. +func EnableNewAuthLibrary() option.ClientOption { + return enableNewAuthLibrary(true) +} + +type enableNewAuthLibrary bool + +func (w enableNewAuthLibrary) Apply(o *internal.DialSettings) { + o.EnableNewAuthLibrary = bool(w) +} + // EmbeddableAdapter is a no-op option.ClientOption that allow libraries to // create their own client options by embedding this type into their own // client-specific option wrapper. See example for usage. diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 621207118..2af5c30f4 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"39353535313838393033333032363632303533\"", + "etag": "\"3133333835393639383131353638313238353437\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -446,6 +446,12 @@ "project" ], "parameters": { + "enableObjectRetention": { + "default": "false", + "description": "When set to true, object retention is enabled for this bucket.", + "location": "query", + "type": "boolean" + }, "predefinedAcl": { "description": "Apply a predefined set of access controls to this bucket.", "enum": [ @@ -1138,6 +1144,301 @@ } } }, + "managedFolders": { + "methods": { + "delete": { + "description": "Permanently deletes a managed folder.", + "httpMethod": "DELETE", + "id": "storage.managedFolders.delete", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "If set, only deletes the managed folder if its metageneration matches this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "If set, only deletes the managed folder if its metageneration does not match this value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Returns metadata of the specified managed folder.", + "httpMethod": "GET", + "id": "storage.managedFolders.get", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration does not match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}", + "response": { + "$ref": "ManagedFolder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "getIamPolicy": { + "description": "Returns an IAM policy for the specified managed folder.", + "httpMethod": "GET", + "id": "storage.managedFolders.getIamPolicy", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "optionsRequestedPolicyVersion": { + "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + "format": "int32", + "location": "query", + "minimum": "1", + "type": "integer" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "insert": { + "description": "Creates a new managed folder.", + "httpMethod": "POST", + "id": "storage.managedFolders.insert", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders", + "request": { + "$ref": "ManagedFolder" + }, + "response": { + "$ref": "ManagedFolder" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Lists managed folders in the given bucket.", + "httpMethod": "GET", + "id": "storage.managedFolders.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "pageSize": { + "description": "Maximum number of items return in a single page of responses.", + "format": "int32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + }, + "prefix": { + "description": "The managed folder name/path prefix to filter the output list of results.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders", + "response": { + "$ref": "ManagedFolders" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "setIamPolicy": { + "description": "Updates an IAM policy for the specified managed folder.", + "httpMethod": "PUT", + "id": "storage.managedFolders.setIamPolicy", + "parameterOrder": [ + "bucket", + "managedFolder" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + "request": { + "$ref": "Policy" + }, + "response": { + "$ref": "Policy" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, + "testIamPermissions": { + "description": "Tests a set of permissions on the given managed folder to see which, if any, are held by the caller.", + "httpMethod": "GET", + "id": "storage.managedFolders.testIamPermissions", + "parameterOrder": [ + "bucket", + "managedFolder", + "permissions" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket containing the managed folder.", + "location": "path", + "required": true, + "type": "string" + }, + "managedFolder": { + "description": "The managed folder name/path.", + "location": "path", + "required": true, + "type": "string" + }, + "permissions": { + "description": "Permissions to test.", + "location": "query", + "repeated": true, + "required": true, + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions", + "response": { + "$ref": "TestIamPermissionsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, "notifications": { "methods": { "delete": { @@ -1572,6 +1873,34 @@ }, "objects": { "methods": { + "bulkRestore": { + "description": "Initiates a long-running bulk restore operation on the specified bucket.", + "httpMethod": "POST", + "id": "storage.objects.bulkRestore", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/o/bulkRestore", + "request": { + "$ref": "BulkRestoreObjectsRequest" + }, + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, "compose": { "description": "Concatenates a list of existing objects into a new object in the same bucket.", "httpMethod": "POST", @@ -1925,6 +2254,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "location": "query", + "type": "boolean" + }, "userProject": { "description": "The project to be billed for this request. Required for Requester Pays buckets.", "location": "query", @@ -2136,6 +2470,11 @@ "location": "query", "type": "string" }, + "includeFoldersAsPrefixes": { + "description": "Only applicable if delimiter is set to '/'. If true, will also include folders and managed folders (besides objects) in the returned prefixes.", + "location": "query", + "type": "boolean" + }, "includeTrailingDelimiter": { "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", "location": "query", @@ -2177,6 +2516,11 @@ "location": "query", "type": "string" }, + "softDeleted": { + "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + "location": "query", + "type": "boolean" + }, "startOffset": { "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", "location": "query", @@ -2257,6 +2601,11 @@ "required": true, "type": "string" }, + "overrideUnlockedRetention": { + "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + "location": "query", + "type": "boolean" + }, "predefinedAcl": { "description": "Apply a predefined set of access controls to this object.", "enum": [ @@ -2309,6 +2658,94 @@ "https://www.googleapis.com/auth/devstorage.full_control" ] }, + "restore": { + "description": "Restores a soft-deleted object.", + "httpMethod": "POST", + "id": "storage.objects.restore", + "parameterOrder": [ + "bucket", + "object" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which the object resides.", + "location": "path", + "required": true, + "type": "string" + }, + "copySourceAcl": { + "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", + "location": "query", + "type": "boolean" + }, + "generation": { + "description": "Selects a specific revision of this object.", + "format": "int64", + "location": "query", + "required": true, + "type": "string" + }, + "ifGenerationMatch": { + "description": "Makes the operation conditional on whether the object's one live generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifGenerationNotMatch": { + "description": "Makes the operation conditional on whether none of the object's live generations match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationMatch": { + "description": "Makes the operation conditional on whether the object's one live metageneration matches the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "ifMetagenerationNotMatch": { + "description": "Makes the operation conditional on whether none of the object's live metagenerations match the given value.", + "format": "int64", + "location": "query", + "type": "string" + }, + "object": { + "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + "location": "path", + "required": true, + "type": "string" + }, + "projection": { + "description": "Set of properties to return. Defaults to full.", + "enum": [ + "full", + "noAcl" + ], + "enumDescriptions": [ + "Include all properties.", + "Omit the owner, acl property." + ], + "location": "query", + "type": "string" + }, + "userProject": { + "description": "The project to be billed for this request. Required for Requester Pays buckets.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/o/{object}/restore", + "request": { + "$ref": "Object" + }, + "response": { + "$ref": "Object" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control" + ] + }, "rewrite": { "description": "Rewrites a source object to a destination object. Optionally overrides metadata.", "httpMethod": "POST", @@ -2617,6 +3054,11 @@ "required": true, "type": "string" }, + "overrideUnlockedRetention": { + "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + "location": "query", + "type": "boolean" + }, "predefinedAcl": { "description": "Apply a predefined set of access controls to this object.", "enum": [ @@ -2764,6 +3206,117 @@ } } }, + "operations": { + "methods": { + "cancel": { + "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.", + "httpMethod": "POST", + "id": "storage.buckets.operations.cancel", + "parameterOrder": [ + "bucket", + "operationId" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the operation resource.", + "location": "path", + "required": true, + "type": "string" + }, + "operationId": { + "description": "The ID of the operation resource.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/operations/{operationId}/cancel", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "get": { + "description": "Gets the latest state of a long-running operation.", + "httpMethod": "GET", + "id": "storage.buckets.operations.get", + "parameterOrder": [ + "bucket", + "operationId" + ], + "parameters": { + "bucket": { + "description": "The parent bucket of the operation resource.", + "location": "path", + "required": true, + "type": "string" + }, + "operationId": { + "description": "The ID of the operation resource.", + "location": "path", + "required": true, + "type": "string" + } + }, + "path": "b/{bucket}/operations/{operationId}", + "response": { + "$ref": "GoogleLongrunningOperation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + }, + "list": { + "description": "Lists operations that match the specified filter in the request.", + "httpMethod": "GET", + "id": "storage.buckets.operations.list", + "parameterOrder": [ + "bucket" + ], + "parameters": { + "bucket": { + "description": "Name of the bucket in which to look for operations.", + "location": "path", + "required": true, + "type": "string" + }, + "filter": { + "description": "A filter to narrow down results to a preferred subset. The filtering language is documented in more detail in [AIP-160](https://google.aip.dev/160).", + "location": "query", + "type": "string" + }, + "pageSize": { + "description": "Maximum number of items to return in a single page of responses. Fewer total results may be returned than requested. The service uses this parameter or 100 items, whichever is smaller.", + "format": "int32", + "location": "query", + "minimum": "0", + "type": "integer" + }, + "pageToken": { + "description": "A previously-returned page token representing part of the larger set of results to view.", + "location": "query", + "type": "string" + } + }, + "path": "b/{bucket}/operations", + "response": { + "$ref": "GoogleLongrunningListOperationsResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/devstorage.full_control", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/devstorage.read_write" + ] + } + } + }, "projects": { "resources": { "hmacKeys": { @@ -3010,7 +3563,7 @@ } } }, - "revision": "20230710", + "revision": "20231012", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { @@ -3036,6 +3589,15 @@ "description": "Whether or not Autoclass is enabled on this bucket", "type": "boolean" }, + "terminalStorageClass": { + "description": "The storage class that objects in the bucket eventually transition to if they are not read for a certain length of time. Valid values are NEARLINE and ARCHIVE.", + "type": "string" + }, + "terminalStorageClassUpdateTime": { + "description": "A date and time in RFC 3339 format representing the time of the most recent update to \"terminalStorageClass\".", + "format": "date-time", + "type": "string" + }, "toggleTime": { "description": "A date and time in RFC 3339 format representing the instant at which \"enabled\" was last toggled.", "format": "date-time", @@ -3319,6 +3881,16 @@ "description": "The name of the bucket.", "type": "string" }, + "objectRetention": { + "description": "The bucket's object retention config.", + "properties": { + "mode": { + "description": "The bucket's object retention mode. Can be Enabled.", + "type": "string" + } + }, + "type": "object" + }, "owner": { "description": "The owner of the bucket. This is always the project team's owner group.", "properties": { @@ -3370,6 +3942,22 @@ "description": "The URI of this bucket.", "type": "string" }, + "softDeletePolicy": { + "description": "The bucket's soft delete policy, which defines the period of time that soft-deleted objects will be retained, and cannot be permanently deleted.", + "properties": { + "effectiveTime": { + "description": "Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "retentionDurationSeconds": { + "description": "The period of time in seconds, that soft-deleted objects in the bucket will be retained and cannot be permanently deleted.", + "format": "int64", + "type": "string" + } + }, + "type": "object" + }, "storageClass": { "description": "The bucket's default storage class, used whenever no storageClass is specified for a newly-created object. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE, COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is not specified when the bucket is created, it will default to STANDARD. For more information, see storage classes.", "type": "string" @@ -3525,6 +4113,38 @@ }, "type": "object" }, + "BulkRestoreObjectsRequest": { + "description": "A bulk restore objects request.", + "id": "BulkRestoreObjectsRequest", + "properties": { + "allowOverwrite": { + "description": "If false (default), the restore will not overwrite live objects with the same name at the destination. This means some deleted objects may be skipped. If true, live objects will be overwritten resulting in a noncurrent object (if versioning is enabled). If versioning is not enabled, overwriting the object will result in a soft-deleted object. In either case, if a noncurrent object already exists with the same name, a live version can be written without issue.", + "type": "boolean" + }, + "copySourceAcl": { + "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", + "type": "boolean" + }, + "matchGlobs": { + "description": "Restores only the objects matching any of the specified glob(s). If this parameter is not specified, all objects will be restored within the specified time range.", + "items": { + "type": "string" + }, + "type": "array" + }, + "softDeletedAfterTime": { + "description": "Restores only the objects that were soft-deleted after this time.", + "format": "date-time", + "type": "string" + }, + "softDeletedBeforeTime": { + "description": "Restores only the objects that were soft-deleted before this time.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, "Channel": { "description": "An notification channel used to watch for resource changes.", "id": "Channel", @@ -3656,6 +4276,86 @@ }, "type": "object" }, + "GoogleLongrunningListOperationsResponse": { + "description": "The response message for storage.buckets.operations.list.", + "id": "GoogleLongrunningListOperationsResponse", + "properties": { + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + }, + "operations": { + "description": "A list of operations that matches the specified filter in the request.", + "items": { + "$ref": "GoogleLongrunningOperation" + }, + "type": "array" + } + }, + "type": "object" + }, + "GoogleLongrunningOperation": { + "description": "This resource represents a long-running operation that is the result of a network API call.", + "id": "GoogleLongrunningOperation", + "properties": { + "done": { + "description": "If the value is \"false\", it means the operation is still in progress. If \"true\", the operation is completed, and either \"error\" or \"response\" is available.", + "type": "boolean" + }, + "error": { + "$ref": "GoogleRpcStatus", + "description": "The error result of the operation in case of failure or cancellation." + }, + "metadata": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "Service-specific metadata associated with the operation. It typically contains progress information and common metadata such as create time. Some services might not provide such metadata. Any method that returns a long-running operation should document the metadata type, if any.", + "type": "object" + }, + "name": { + "description": "The server-assigned name, which is only unique within the same service that originally returns it. If you use the default HTTP mapping, the \"name\" should be a resource name ending with \"operations/{operationId}\".", + "type": "string" + }, + "response": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "description": "The normal response of the operation in case of success. If the original method returns no data on success, such as \"Delete\", the response is google.protobuf.Empty. If the original method is standard Get/Create/Update, the response should be the resource. For other methods, the response should have the type \"XxxResponse\", where \"Xxx\" is the original method name. For example, if the original method name is \"TakeSnapshot()\", the inferred response type is \"TakeSnapshotResponse\".", + "type": "object" + } + }, + "type": "object" + }, + "GoogleRpcStatus": { + "description": "The \"Status\" type defines a logical error model that is suitable for different programming environments, including REST APIs and RPC APIs. It is used by [gRPC](https://github.com/grpc). Each \"Status\" message contains three pieces of data: error code, error message, and error details. You can find out more about this error model and how to work with it in the [API Design Guide](https://cloud.google.com/apis/design/errors).", + "id": "GoogleRpcStatus", + "properties": { + "code": { + "description": "The status code, which should be an enum value of google.rpc.Code.", + "format": "int32", + "type": "integer" + }, + "details": { + "description": "A list of messages that carry the error details. There is a common set of message types for APIs to use.", + "items": { + "additionalProperties": { + "description": "Properties of the object. Contains field @type with type URL.", + "type": "any" + }, + "type": "object" + }, + "type": "array" + }, + "message": { + "description": "A developer-facing error message, which should be in English.", + "type": "string" + } + }, + "type": "object" + }, "HmacKey": { "description": "JSON template to produce a JSON-style HMAC Key resource for Create responses.", "id": "HmacKey", @@ -3749,6 +4449,72 @@ }, "type": "object" }, + "ManagedFolder": { + "description": "A managed folder.", + "id": "ManagedFolder", + "properties": { + "bucket": { + "description": "The name of the bucket containing this managed folder.", + "type": "string" + }, + "createTime": { + "description": "The creation time of the managed folder in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, + "id": { + "description": "The ID of the managed folder, including the bucket name and managed folder name.", + "type": "string" + }, + "kind": { + "default": "storage#managedFolder", + "description": "The kind of item this is. For managed folders, this is always storage#managedFolder.", + "type": "string" + }, + "metageneration": { + "description": "The version of the metadata for this managed folder. Used for preconditions and for detecting changes in metadata.", + "format": "int64", + "type": "string" + }, + "name": { + "description": "The name of the managed folder. Required if not specified by URL parameter.", + "type": "string" + }, + "selfLink": { + "description": "The link to this managed folder.", + "type": "string" + }, + "updateTime": { + "description": "The last update time of the managed folder metadata in RFC 3339 format.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, + "ManagedFolders": { + "description": "A list of managed folders.", + "id": "ManagedFolders", + "properties": { + "items": { + "description": "The list of items.", + "items": { + "$ref": "ManagedFolder" + }, + "type": "array" + }, + "kind": { + "default": "storage#managedFolders", + "description": "The kind of item this is. For lists of managed folders, this is always storage#managedFolders.", + "type": "string" + }, + "nextPageToken": { + "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results.", + "type": "string" + } + }, + "type": "object" + }, "Notification": { "description": "A subscription to receive Google PubSub notifications.", "id": "Notification", @@ -3910,6 +4676,11 @@ "format": "int64", "type": "string" }, + "hardDeleteTime": { + "description": "This is the time (in the future) when the soft-deleted object will no longer be restorable. It is equal to the soft delete time plus the current soft delete retention duration of the bucket.", + "format": "date-time", + "type": "string" + }, "id": { "description": "The ID of the object, including the bucket name, object name, and generation number.", "type": "string" @@ -3962,6 +4733,21 @@ }, "type": "object" }, + "retention": { + "description": "A collection of object level retention parameters.", + "properties": { + "mode": { + "description": "The bucket's object retention mode, can only be Unlocked or Locked.", + "type": "string" + }, + "retainUntilTime": { + "description": "A time in RFC 3339 format until which object retention protects this object.", + "format": "date-time", + "type": "string" + } + }, + "type": "object" + }, "retentionExpirationTime": { "description": "A server-determined value that specifies the earliest time that the object's retention period expires. This value is in RFC 3339 format. Note 1: This field is not provided for objects with an active event-based hold, since retention expiration is unknown until the hold is removed. Note 2: This value can be provided even when temporary hold is set (so that the user can reason about policy without having to first unset the temporary hold).", "format": "date-time", @@ -3976,6 +4762,11 @@ "format": "uint64", "type": "string" }, + "softDeleteTime": { + "description": "The time at which the object became soft-deleted in RFC 3339 format.", + "format": "date-time", + "type": "string" + }, "storageClass": { "description": "Storage class of the object.", "type": "string" @@ -3990,7 +4781,7 @@ "type": "string" }, "timeDeleted": { - "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", + "description": "The time at which the object became noncurrent in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.", "format": "date-time", "type": "string" }, @@ -4140,14 +4931,15 @@ "type": "object" }, "Policy": { - "description": "A bucket/object IAM policy.", + "description": "A bucket/object/managedFolder IAM policy.", "id": "Policy", "properties": { "bindings": { "annotations": { "required": [ "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" ] }, "description": "An association between a role, which comes with a set of permissions, and members who may assume that role.", @@ -4161,7 +4953,8 @@ "annotations": { "required": [ "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" ] }, "description": "A collection of identifiers for members who may assume the provided role. Recognized identifiers are as follows: \n- allUsers — A special identifier that represents anyone on the internet; with or without a Google account. \n- allAuthenticatedUsers — A special identifier that represents anyone who is authenticated with a Google account or a service account. \n- user:emailid — An email address that represents a specific account. For example, user:alice@gmail.com or user:joe@example.com. \n- serviceAccount:emailid — An email address that represents a service account. For example, serviceAccount:my-other-app@appspot.gserviceaccount.com . \n- group:emailid — An email address that represents a Google group. For example, group:admins@example.com. \n- domain:domain — A Google Apps domain name that represents all the users of that domain. For example, domain:google.com or domain:example.com. \n- projectOwner:projectid — Owners of the given project. For example, projectOwner:my-example-project \n- projectEditor:projectid — Editors of the given project. For example, projectEditor:my-example-project \n- projectViewer:projectid — Viewers of the given project. For example, projectViewer:my-example-project", @@ -4174,7 +4967,8 @@ "annotations": { "required": [ "storage.buckets.setIamPolicy", - "storage.objects.setIamPolicy" + "storage.objects.setIamPolicy", + "storage.managedFolders.setIamPolicy" ] }, "description": "The role to which members belong. Two types of roles are supported: new IAM roles, which grant permissions that do not map directly to those provided by ACLs, and legacy IAM roles, which do map directly to ACL permissions. All roles are of the format roles/storage.specificRole.\nThe new IAM roles are: \n- roles/storage.admin — Full control of Google Cloud Storage resources. \n- roles/storage.objectViewer — Read-Only access to Google Cloud Storage objects. \n- roles/storage.objectCreator — Access to create objects in Google Cloud Storage. \n- roles/storage.objectAdmin — Full control of Google Cloud Storage objects. The legacy IAM roles are: \n- roles/storage.legacyObjectReader — Read-only access to objects without listing. Equivalent to an ACL entry on an object with the READER role. \n- roles/storage.legacyObjectOwner — Read/write access to existing objects without listing. Equivalent to an ACL entry on an object with the OWNER role. \n- roles/storage.legacyBucketReader — Read access to buckets with object listing. Equivalent to an ACL entry on a bucket with the READER role. \n- roles/storage.legacyBucketWriter — Read access to buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the WRITER role. \n- roles/storage.legacyBucketOwner — Read and write access to existing buckets with object listing/creation/deletion. Equivalent to an ACL entry on a bucket with the OWNER role.", @@ -4196,7 +4990,7 @@ "type": "string" }, "resourceId": { - "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, and projects/_/buckets/bucket/objects/object for objects. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", + "description": "The ID of the resource to which this policy belongs. Will be of the form projects/_/buckets/bucket for buckets, projects/_/buckets/bucket/objects/object for objects, and projects/_/buckets/bucket/managedFolders/managedFolder. A specific generation may be specified by appending #generationNumber to the end of the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17. The current generation can be denoted with #0. This field is ignored on input.", "type": "string" }, "version": { @@ -4258,7 +5052,7 @@ "type": "object" }, "TestIamPermissionsResponse": { - "description": "A storage.(buckets|objects).testIamPermissions response.", + "description": "A storage.(buckets|objects|managedFolders).testIamPermissions response.", "id": "TestIamPermissionsResponse", "properties": { "kind": { @@ -4267,7 +5061,7 @@ "type": "string" }, "permissions": { - "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets or objects. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata.", + "description": "The permissions held by the caller. Permissions are always of the format storage.resource.capability, where resource is one of buckets, objects, or managedFolders. The supported permissions are as follows: \n- storage.buckets.delete — Delete bucket. \n- storage.buckets.get — Read bucket metadata. \n- storage.buckets.getIamPolicy — Read bucket IAM policy. \n- storage.buckets.create — Create bucket. \n- storage.buckets.list — List buckets. \n- storage.buckets.setIamPolicy — Update bucket IAM policy. \n- storage.buckets.update — Update bucket metadata. \n- storage.objects.delete — Delete object. \n- storage.objects.get — Read object data and metadata. \n- storage.objects.getIamPolicy — Read object IAM policy. \n- storage.objects.create — Create object. \n- storage.objects.list — List objects. \n- storage.objects.setIamPolicy — Update object IAM policy. \n- storage.objects.update — Update object metadata. \n- storage.managedFolders.delete — Delete managed folder. \n- storage.managedFolders.get — Read managed folder metadata. \n- storage.managedFolders.getIamPolicy — Read managed folder IAM policy. \n- storage.managedFolders.create — Create managed folder. \n- storage.managedFolders.list — List managed folders. \n- storage.managedFolders.setIamPolicy — Update managed folder IAM policy.", "items": { "type": "string" }, diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 69a6e41e7..2b2aa81ec 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -10,6 +10,17 @@ // // For product documentation, see: https://developers.google.com/storage/docs/json_api/ // +// # Library status +// +// These client libraries are officially supported by Google. However, this +// library is considered complete and is in maintenance mode. This means +// that we will address critical bugs and security issues but will not add +// any new features. +// +// When possible, we recommend using our newer +// [Cloud Client Libraries for Go](https://pkg.go.dev/cloud.google.com/go) +// that are still actively being worked and iterated on. +// // # Creating a client // // Usage example: @@ -19,28 +30,31 @@ // ctx := context.Background() // storageService, err := storage.NewService(ctx) // -// In this example, Google Application Default Credentials are used for authentication. -// -// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. +// In this example, Google Application Default Credentials are used for +// authentication. For information on how to create and obtain Application +// Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // # Other authentication options // -// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: +// By default, all available scopes (see "Constants") are used to authenticate. +// To restrict scopes, use [google.golang.org/api/option.WithScopes]: // // storageService, err := storage.NewService(ctx, option.WithScopes(storage.DevstorageReadWriteScope)) // -// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: +// To use an API key for authentication (note: some APIs do not support API +// keys), use [google.golang.org/api/option.WithAPIKey]: // // storageService, err := storage.NewService(ctx, option.WithAPIKey("AIza...")) // -// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: +// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth +// flow, use [google.golang.org/api/option.WithTokenSource]: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // storageService, err := storage.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // -// See https://godoc.org/google.golang.org/api/option/ for details on options. +// See [google.golang.org/api/option.ClientOption] for details on options. package storage // import "google.golang.org/api/storage/v1" import ( @@ -145,9 +159,11 @@ func New(client *http.Client) (*Service, error) { s.Buckets = NewBucketsService(s) s.Channels = NewChannelsService(s) s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s) + s.ManagedFolders = NewManagedFoldersService(s) s.Notifications = NewNotificationsService(s) s.ObjectAccessControls = NewObjectAccessControlsService(s) s.Objects = NewObjectsService(s) + s.Operations = NewOperationsService(s) s.Projects = NewProjectsService(s) return s, nil } @@ -165,12 +181,16 @@ type Service struct { DefaultObjectAccessControls *DefaultObjectAccessControlsService + ManagedFolders *ManagedFoldersService + Notifications *NotificationsService ObjectAccessControls *ObjectAccessControlsService Objects *ObjectsService + Operations *OperationsService + Projects *ProjectsService } @@ -217,6 +237,15 @@ type DefaultObjectAccessControlsService struct { s *Service } +func NewManagedFoldersService(s *Service) *ManagedFoldersService { + rs := &ManagedFoldersService{s: s} + return rs +} + +type ManagedFoldersService struct { + s *Service +} + func NewNotificationsService(s *Service) *NotificationsService { rs := &NotificationsService{s: s} return rs @@ -244,6 +273,15 @@ type ObjectsService struct { s *Service } +func NewOperationsService(s *Service) *OperationsService { + rs := &OperationsService{s: s} + return rs +} + +type OperationsService struct { + s *Service +} + func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} rs.HmacKeys = NewProjectsHmacKeysService(s) @@ -359,6 +397,9 @@ type Bucket struct { // Name: The name of the bucket. Name string `json:"name,omitempty"` + // ObjectRetention: The bucket's object retention config. + ObjectRetention *BucketObjectRetention `json:"objectRetention,omitempty"` + // Owner: The owner of the bucket. This is always the project team's // owner group. Owner *BucketOwner `json:"owner,omitempty"` @@ -389,6 +430,11 @@ type Bucket struct { // SelfLink: The URI of this bucket. SelfLink string `json:"selfLink,omitempty"` + // SoftDeletePolicy: The bucket's soft delete policy, which defines the + // period of time that soft-deleted objects will be retained, and cannot + // be permanently deleted. + SoftDeletePolicy *BucketSoftDeletePolicy `json:"softDeletePolicy,omitempty"` + // StorageClass: The bucket's default storage class, used whenever no // storageClass is specified for a newly-created object. This defines // how objects in the bucket are stored and determines the SLA and the @@ -444,6 +490,16 @@ type BucketAutoclass struct { // Enabled: Whether or not Autoclass is enabled on this bucket Enabled bool `json:"enabled,omitempty"` + // TerminalStorageClass: The storage class that objects in the bucket + // eventually transition to if they are not read for a certain length of + // time. Valid values are NEARLINE and ARCHIVE. + TerminalStorageClass string `json:"terminalStorageClass,omitempty"` + + // TerminalStorageClassUpdateTime: A date and time in RFC 3339 format + // representing the time of the most recent update to + // "terminalStorageClass". + TerminalStorageClassUpdateTime string `json:"terminalStorageClassUpdateTime,omitempty"` + // ToggleTime: A date and time in RFC 3339 format representing the // instant at which "enabled" was last toggled. ToggleTime string `json:"toggleTime,omitempty"` @@ -946,6 +1002,34 @@ func (s *BucketLogging) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BucketObjectRetention: The bucket's object retention config. +type BucketObjectRetention struct { + // Mode: The bucket's object retention mode. Can be Enabled. + Mode string `json:"mode,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mode") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketObjectRetention) MarshalJSON() ([]byte, error) { + type NoMethod BucketObjectRetention + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BucketOwner: The owner of the bucket. This is always the project // team's owner group. type BucketOwner struct { @@ -1027,6 +1111,43 @@ func (s *BucketRetentionPolicy) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BucketSoftDeletePolicy: The bucket's soft delete policy, which +// defines the period of time that soft-deleted objects will be +// retained, and cannot be permanently deleted. +type BucketSoftDeletePolicy struct { + // EffectiveTime: Server-determined value that indicates the time from + // which the policy, or one with a greater retention, was effective. + // This value is in RFC 3339 format. + EffectiveTime string `json:"effectiveTime,omitempty"` + + // RetentionDurationSeconds: The period of time in seconds, that + // soft-deleted objects in the bucket will be retained and cannot be + // permanently deleted. + RetentionDurationSeconds int64 `json:"retentionDurationSeconds,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "EffectiveTime") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "EffectiveTime") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *BucketSoftDeletePolicy) MarshalJSON() ([]byte, error) { + type NoMethod BucketSoftDeletePolicy + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // BucketVersioning: The bucket's versioning configuration. type BucketVersioning struct { // Enabled: While set to true, versioning is fully enabled for this @@ -1282,6 +1403,59 @@ func (s *Buckets) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// BulkRestoreObjectsRequest: A bulk restore objects request. +type BulkRestoreObjectsRequest struct { + // AllowOverwrite: If false (default), the restore will not overwrite + // live objects with the same name at the destination. This means some + // deleted objects may be skipped. If true, live objects will be + // overwritten resulting in a noncurrent object (if versioning is + // enabled). If versioning is not enabled, overwriting the object will + // result in a soft-deleted object. In either case, if a noncurrent + // object already exists with the same name, a live version can be + // written without issue. + AllowOverwrite bool `json:"allowOverwrite,omitempty"` + + // CopySourceAcl: If true, copies the source object's ACL; otherwise, + // uses the bucket's default object ACL. The default is false. + CopySourceAcl bool `json:"copySourceAcl,omitempty"` + + // MatchGlobs: Restores only the objects matching any of the specified + // glob(s). If this parameter is not specified, all objects will be + // restored within the specified time range. + MatchGlobs []string `json:"matchGlobs,omitempty"` + + // SoftDeletedAfterTime: Restores only the objects that were + // soft-deleted after this time. + SoftDeletedAfterTime string `json:"softDeletedAfterTime,omitempty"` + + // SoftDeletedBeforeTime: Restores only the objects that were + // soft-deleted before this time. + SoftDeletedBeforeTime string `json:"softDeletedBeforeTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AllowOverwrite") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "AllowOverwrite") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *BulkRestoreObjectsRequest) MarshalJSON() ([]byte, error) { + type NoMethod BulkRestoreObjectsRequest + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Channel: An notification channel used to watch for resource changes. type Channel struct { // Address: The address where notifications are delivered for this @@ -1498,6 +1672,150 @@ func (s *Expr) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// GoogleLongrunningListOperationsResponse: The response message for +// storage.buckets.operations.list. +type GoogleLongrunningListOperationsResponse struct { + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // Operations: A list of operations that matches the specified filter in + // the request. + Operations []*GoogleLongrunningOperation `json:"operations,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "NextPageToken") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "NextPageToken") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleLongrunningListOperationsResponse) MarshalJSON() ([]byte, error) { + type NoMethod GoogleLongrunningListOperationsResponse + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleLongrunningOperation: This resource represents a long-running +// operation that is the result of a network API call. +type GoogleLongrunningOperation struct { + // Done: If the value is "false", it means the operation is still in + // progress. If "true", the operation is completed, and either "error" + // or "response" is available. + Done bool `json:"done,omitempty"` + + // Error: The error result of the operation in case of failure or + // cancellation. + Error *GoogleRpcStatus `json:"error,omitempty"` + + // Metadata: Service-specific metadata associated with the operation. It + // typically contains progress information and common metadata such as + // create time. Some services might not provide such metadata. Any + // method that returns a long-running operation should document the + // metadata type, if any. + Metadata googleapi.RawMessage `json:"metadata,omitempty"` + + // Name: The server-assigned name, which is only unique within the same + // service that originally returns it. If you use the default HTTP + // mapping, the "name" should be a resource name ending with + // "operations/{operationId}". + Name string `json:"name,omitempty"` + + // Response: The normal response of the operation in case of success. If + // the original method returns no data on success, such as "Delete", the + // response is google.protobuf.Empty. If the original method is standard + // Get/Create/Update, the response should be the resource. For other + // methods, the response should have the type "XxxResponse", where "Xxx" + // is the original method name. For example, if the original method name + // is "TakeSnapshot()", the inferred response type is + // "TakeSnapshotResponse". + Response googleapi.RawMessage `json:"response,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Done") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Done") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleLongrunningOperation) MarshalJSON() ([]byte, error) { + type NoMethod GoogleLongrunningOperation + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// GoogleRpcStatus: The "Status" type defines a logical error model that +// is suitable for different programming environments, including REST +// APIs and RPC APIs. It is used by gRPC (https://github.com/grpc). Each +// "Status" message contains three pieces of data: error code, error +// message, and error details. You can find out more about this error +// model and how to work with it in the API Design Guide +// (https://cloud.google.com/apis/design/errors). +type GoogleRpcStatus struct { + // Code: The status code, which should be an enum value of + // google.rpc.Code. + Code int64 `json:"code,omitempty"` + + // Details: A list of messages that carry the error details. There is a + // common set of message types for APIs to use. + Details []googleapi.RawMessage `json:"details,omitempty"` + + // Message: A developer-facing error message, which should be in + // English. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Code") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *GoogleRpcStatus) MarshalJSON() ([]byte, error) { + type NoMethod GoogleRpcStatus + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // HmacKey: JSON template to produce a JSON-style HMAC Key resource for // Create responses. type HmacKey struct { @@ -1645,6 +1963,106 @@ func (s *HmacKeysMetadata) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ManagedFolder: A managed folder. +type ManagedFolder struct { + // Bucket: The name of the bucket containing this managed folder. + Bucket string `json:"bucket,omitempty"` + + // CreateTime: The creation time of the managed folder in RFC 3339 + // format. + CreateTime string `json:"createTime,omitempty"` + + // Id: The ID of the managed folder, including the bucket name and + // managed folder name. + Id string `json:"id,omitempty"` + + // Kind: The kind of item this is. For managed folders, this is always + // storage#managedFolder. + Kind string `json:"kind,omitempty"` + + // Metageneration: The version of the metadata for this managed folder. + // Used for preconditions and for detecting changes in metadata. + Metageneration int64 `json:"metageneration,omitempty,string"` + + // Name: The name of the managed folder. Required if not specified by + // URL parameter. + Name string `json:"name,omitempty"` + + // SelfLink: The link to this managed folder. + SelfLink string `json:"selfLink,omitempty"` + + // UpdateTime: The last update time of the managed folder metadata in + // RFC 3339 format. + UpdateTime string `json:"updateTime,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bucket") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Bucket") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedFolder) MarshalJSON() ([]byte, error) { + type NoMethod ManagedFolder + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// ManagedFolders: A list of managed folders. +type ManagedFolders struct { + // Items: The list of items. + Items []*ManagedFolder `json:"items,omitempty"` + + // Kind: The kind of item this is. For lists of managed folders, this is + // always storage#managedFolders. + Kind string `json:"kind,omitempty"` + + // NextPageToken: The continuation token, used to page through large + // result sets. Provide this value in a subsequent request to return the + // next page of results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Items") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Items") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ManagedFolders) MarshalJSON() ([]byte, error) { + type NoMethod ManagedFolders + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // Notification: A subscription to receive Google PubSub notifications. type Notification struct { // CustomAttributes: An optional list of additional attributes to attach @@ -1812,6 +2230,12 @@ type Object struct { // versioning. Generation int64 `json:"generation,omitempty,string"` + // HardDeleteTime: This is the time (in the future) when the + // soft-deleted object will no longer be restorable. It is equal to the + // soft delete time plus the current soft delete retention duration of + // the bucket. + HardDeleteTime string `json:"hardDeleteTime,omitempty"` + // Id: The ID of the object, including the bucket name, object name, and // generation number. Id string `json:"id,omitempty"` @@ -1849,6 +2273,9 @@ type Object struct { // the object. Owner *ObjectOwner `json:"owner,omitempty"` + // Retention: A collection of object level retention parameters. + Retention *ObjectRetention `json:"retention,omitempty"` + // RetentionExpirationTime: A server-determined value that specifies the // earliest time that the object's retention period expires. This value // is in RFC 3339 format. Note 1: This field is not provided for objects @@ -1864,6 +2291,10 @@ type Object struct { // Size: Content-Length of the data in bytes. Size uint64 `json:"size,omitempty,string"` + // SoftDeleteTime: The time at which the object became soft-deleted in + // RFC 3339 format. + SoftDeleteTime string `json:"softDeleteTime,omitempty"` + // StorageClass: Storage class of the object. StorageClass string `json:"storageClass,omitempty"` @@ -1879,9 +2310,9 @@ type Object struct { // TimeCreated: The creation time of the object in RFC 3339 format. TimeCreated string `json:"timeCreated,omitempty"` - // TimeDeleted: The deletion time of the object in RFC 3339 format. Will - // be returned if and only if this version of the object has been - // deleted. + // TimeDeleted: The time at which the object became noncurrent in RFC + // 3339 format. Will be returned if and only if this version of the + // object has been deleted. TimeDeleted string `json:"timeDeleted,omitempty"` // TimeStorageClassUpdated: The time at which the object's storage class @@ -1990,6 +2421,39 @@ func (s *ObjectOwner) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// ObjectRetention: A collection of object level retention parameters. +type ObjectRetention struct { + // Mode: The bucket's object retention mode, can only be Unlocked or + // Locked. + Mode string `json:"mode,omitempty"` + + // RetainUntilTime: A time in RFC 3339 format until which object + // retention protects this object. + RetainUntilTime string `json:"retainUntilTime,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Mode") to + // unconditionally include in API requests. By default, fields with + // empty or default values are omitted from API requests. However, any + // non-pointer, non-interface field appearing in ForceSendFields will be + // sent to the server regardless of whether the field is empty or not. + // This may be used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "Mode") to include in API + // requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *ObjectRetention) MarshalJSON() ([]byte, error) { + type NoMethod ObjectRetention + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + // ObjectAccessControl: An access-control entry. type ObjectAccessControl struct { // Bucket: The name of the bucket. @@ -2187,7 +2651,7 @@ func (s *Objects) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// Policy: A bucket/object IAM policy. +// Policy: A bucket/object/managedFolder IAM policy. type Policy struct { // Bindings: An association between a role, which comes with a set of // permissions, and members who may assume that role. @@ -2201,8 +2665,9 @@ type Policy struct { Kind string `json:"kind,omitempty"` // ResourceId: The ID of the resource to which this policy belongs. Will - // be of the form projects/_/buckets/bucket for buckets, and - // projects/_/buckets/bucket/objects/object for objects. A specific + // be of the form projects/_/buckets/bucket for buckets, + // projects/_/buckets/bucket/objects/object for objects, and + // projects/_/buckets/bucket/managedFolders/managedFolder. A specific // generation may be specified by appending #generationNumber to the end // of the object name, e.g. // projects/_/buckets/my-bucket/objects/data.txt#17. The current @@ -2419,15 +2884,15 @@ func (s *ServiceAccount) MarshalJSON() ([]byte, error) { } // TestIamPermissionsResponse: A -// storage.(buckets|objects).testIamPermissions response. +// storage.(buckets|objects|managedFolders).testIamPermissions response. type TestIamPermissionsResponse struct { // Kind: The kind of item this is. Kind string `json:"kind,omitempty"` // Permissions: The permissions held by the caller. Permissions are // always of the format storage.resource.capability, where resource is - // one of buckets or objects. The supported permissions are as follows: - // + // one of buckets, objects, or managedFolders. The supported permissions + // are as follows: // - storage.buckets.delete — Delete bucket. // - storage.buckets.get — Read bucket metadata. // - storage.buckets.getIamPolicy — Read bucket IAM policy. @@ -2442,6 +2907,14 @@ type TestIamPermissionsResponse struct { // - storage.objects.list — List objects. // - storage.objects.setIamPolicy — Update object IAM policy. // - storage.objects.update — Update object metadata. + // - storage.managedFolders.delete — Delete managed folder. + // - storage.managedFolders.get — Read managed folder metadata. + // - storage.managedFolders.getIamPolicy — Read managed folder IAM + // policy. + // - storage.managedFolders.create — Create managed folder. + // - storage.managedFolders.list — List managed folders. + // - storage.managedFolders.setIamPolicy — Update managed folder IAM + // policy. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -3966,6 +4439,14 @@ func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsert return c } +// EnableObjectRetention sets the optional parameter +// "enableObjectRetention": When set to true, object retention is +// enabled for this bucket. +func (c *BucketsInsertCall) EnableObjectRetention(enableObjectRetention bool) *BucketsInsertCall { + c.urlParams_.Set("enableObjectRetention", fmt.Sprint(enableObjectRetention)) + return c +} + // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this bucket. // @@ -4139,6 +4620,12 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // "project" // ], // "parameters": { + // "enableObjectRetention": { + // "default": "false", + // "description": "When set to true, object retention is enabled for this bucket.", + // "location": "query", + // "type": "boolean" + // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this bucket.", // "enum": [ @@ -6627,6 +7114,1222 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) } +// method id "storage.managedFolders.delete": + +type ManagedFoldersDeleteCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Delete: Permanently deletes a managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) Delete(bucket string, managedFolder string) *ManagedFoldersDeleteCall { + c := &ManagedFoldersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": If set, only deletes the managed folder if +// its metageneration matches this value. +func (c *ManagedFoldersDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ManagedFoldersDeleteCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": If set, only deletes the managed folder +// if its metageneration does not match this value. +func (c *ManagedFoldersDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ManagedFoldersDeleteCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedFoldersDeleteCall) Fields(s ...googleapi.Field) *ManagedFoldersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedFoldersDeleteCall) Context(ctx context.Context) *ManagedFoldersDeleteCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedFoldersDeleteCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("DELETE", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "managedFolder": c.managedFolder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.delete" call. +func (c *ManagedFoldersDeleteCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Permanently deletes a managed folder.", + // "httpMethod": "DELETE", + // "id": "storage.managedFolders.delete", + // "parameterOrder": [ + // "bucket", + // "managedFolder" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "If set, only deletes the managed folder if its metageneration matches this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "If set, only deletes the managed folder if its metageneration does not match this value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "managedFolder": { + // "description": "The managed folder name/path.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/managedFolders/{managedFolder}", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.managedFolders.get": + +type ManagedFoldersGetCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Returns metadata of the specified managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) Get(bucket string, managedFolder string) *ManagedFoldersGetCall { + c := &ManagedFoldersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the return of the managed folder +// metadata conditional on whether the managed folder's current +// metageneration matches the given value. +func (c *ManagedFoldersGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ManagedFoldersGetCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the return of the managed folder +// metadata conditional on whether the managed folder's current +// metageneration does not match the given value. +func (c *ManagedFoldersGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ManagedFoldersGetCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedFoldersGetCall) Fields(s ...googleapi.Field) *ManagedFoldersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedFoldersGetCall) IfNoneMatch(entityTag string) *ManagedFoldersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedFoldersGetCall) Context(ctx context.Context) *ManagedFoldersGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedFoldersGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "managedFolder": c.managedFolder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.get" call. +// Exactly one of *ManagedFolder or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedFolder.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedFoldersGetCall) Do(opts ...googleapi.CallOption) (*ManagedFolder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ManagedFolder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns metadata of the specified managed folder.", + // "httpMethod": "GET", + // "id": "storage.managedFolders.get", + // "parameterOrder": [ + // "bucket", + // "managedFolder" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the return of the managed folder metadata conditional on whether the managed folder's current metageneration does not match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "managedFolder": { + // "description": "The managed folder name/path.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/managedFolders/{managedFolder}", + // "response": { + // "$ref": "ManagedFolder" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.managedFolders.getIamPolicy": + +type ManagedFoldersGetIamPolicyCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// GetIamPolicy: Returns an IAM policy for the specified managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) GetIamPolicy(bucket string, managedFolder string) *ManagedFoldersGetIamPolicyCall { + c := &ManagedFoldersGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + return c +} + +// OptionsRequestedPolicyVersion sets the optional parameter +// "optionsRequestedPolicyVersion": The IAM policy format version to be +// returned. If the optionsRequestedPolicyVersion is for an older +// version that doesn't support part of the requested IAM policy, the +// request fails. +func (c *ManagedFoldersGetIamPolicyCall) OptionsRequestedPolicyVersion(optionsRequestedPolicyVersion int64) *ManagedFoldersGetIamPolicyCall { + c.urlParams_.Set("optionsRequestedPolicyVersion", fmt.Sprint(optionsRequestedPolicyVersion)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ManagedFoldersGetIamPolicyCall) UserProject(userProject string) *ManagedFoldersGetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedFoldersGetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedFoldersGetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedFoldersGetIamPolicyCall) IfNoneMatch(entityTag string) *ManagedFoldersGetIamPolicyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedFoldersGetIamPolicyCall) Context(ctx context.Context) *ManagedFoldersGetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedFoldersGetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "managedFolder": c.managedFolder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.getIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ManagedFoldersGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns an IAM policy for the specified managed folder.", + // "httpMethod": "GET", + // "id": "storage.managedFolders.getIamPolicy", + // "parameterOrder": [ + // "bucket", + // "managedFolder" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "managedFolder": { + // "description": "The managed folder name/path.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "optionsRequestedPolicyVersion": { + // "description": "The IAM policy format version to be returned. If the optionsRequestedPolicyVersion is for an older version that doesn't support part of the requested IAM policy, the request fails.", + // "format": "int32", + // "location": "query", + // "minimum": "1", + // "type": "integer" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.managedFolders.insert": + +type ManagedFoldersInsertCall struct { + s *Service + bucket string + managedfolder *ManagedFolder + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Insert: Creates a new managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +func (r *ManagedFoldersService) Insert(bucket string, managedfolder *ManagedFolder) *ManagedFoldersInsertCall { + c := &ManagedFoldersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedfolder = managedfolder + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedFoldersInsertCall) Fields(s ...googleapi.Field) *ManagedFoldersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedFoldersInsertCall) Context(ctx context.Context) *ManagedFoldersInsertCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedFoldersInsertCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.managedfolder) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.insert" call. +// Exactly one of *ManagedFolder or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedFolder.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedFoldersInsertCall) Do(opts ...googleapi.CallOption) (*ManagedFolder, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ManagedFolder{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a new managed folder.", + // "httpMethod": "POST", + // "id": "storage.managedFolders.insert", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/managedFolders", + // "request": { + // "$ref": "ManagedFolder" + // }, + // "response": { + // "$ref": "ManagedFolder" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.managedFolders.list": + +type ManagedFoldersListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists managed folders in the given bucket. +// +// - bucket: Name of the bucket containing the managed folder. +func (r *ManagedFoldersService) List(bucket string) *ManagedFoldersListCall { + c := &ManagedFoldersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// items return in a single page of responses. +func (c *ManagedFoldersListCall) PageSize(pageSize int64) *ManagedFoldersListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *ManagedFoldersListCall) PageToken(pageToken string) *ManagedFoldersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Prefix sets the optional parameter "prefix": The managed folder +// name/path prefix to filter the output list of results. +func (c *ManagedFoldersListCall) Prefix(prefix string) *ManagedFoldersListCall { + c.urlParams_.Set("prefix", prefix) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedFoldersListCall) Fields(s ...googleapi.Field) *ManagedFoldersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedFoldersListCall) IfNoneMatch(entityTag string) *ManagedFoldersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedFoldersListCall) Context(ctx context.Context) *ManagedFoldersListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedFoldersListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.list" call. +// Exactly one of *ManagedFolders or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *ManagedFolders.ServerResponse.Header or (if a response was returned +// at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedFoldersListCall) Do(opts ...googleapi.CallOption) (*ManagedFolders, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &ManagedFolders{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists managed folders in the given bucket.", + // "httpMethod": "GET", + // "id": "storage.managedFolders.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "pageSize": { + // "description": "Maximum number of items return in a single page of responses.", + // "format": "int32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // }, + // "prefix": { + // "description": "The managed folder name/path prefix to filter the output list of results.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/managedFolders", + // "response": { + // "$ref": "ManagedFolders" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *ManagedFoldersListCall) Pages(ctx context.Context, f func(*ManagedFolders) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "storage.managedFolders.setIamPolicy": + +type ManagedFoldersSetIamPolicyCall struct { + s *Service + bucket string + managedFolder string + policy *Policy + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// SetIamPolicy: Updates an IAM policy for the specified managed folder. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +func (r *ManagedFoldersService) SetIamPolicy(bucket string, managedFolder string, policy *Policy) *ManagedFoldersSetIamPolicyCall { + c := &ManagedFoldersSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + c.policy = policy + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ManagedFoldersSetIamPolicyCall) UserProject(userProject string) *ManagedFoldersSetIamPolicyCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedFoldersSetIamPolicyCall) Fields(s ...googleapi.Field) *ManagedFoldersSetIamPolicyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedFoldersSetIamPolicyCall) Context(ctx context.Context) *ManagedFoldersSetIamPolicyCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedFoldersSetIamPolicyCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.policy) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("PUT", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "managedFolder": c.managedFolder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.setIamPolicy" call. +// Exactly one of *Policy or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Policy.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ManagedFoldersSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Policy{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates an IAM policy for the specified managed folder.", + // "httpMethod": "PUT", + // "id": "storage.managedFolders.setIamPolicy", + // "parameterOrder": [ + // "bucket", + // "managedFolder" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "managedFolder": { + // "description": "The managed folder name/path.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/managedFolders/{managedFolder}/iam", + // "request": { + // "$ref": "Policy" + // }, + // "response": { + // "$ref": "Policy" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + +// method id "storage.managedFolders.testIamPermissions": + +type ManagedFoldersTestIamPermissionsCall struct { + s *Service + bucket string + managedFolder string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// TestIamPermissions: Tests a set of permissions on the given managed +// folder to see which, if any, are held by the caller. +// +// - bucket: Name of the bucket containing the managed folder. +// - managedFolder: The managed folder name/path. +// - permissions: Permissions to test. +func (r *ManagedFoldersService) TestIamPermissions(bucket string, managedFolder string, permissions []string) *ManagedFoldersTestIamPermissionsCall { + c := &ManagedFoldersTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.managedFolder = managedFolder + c.urlParams_.SetMulti("permissions", append([]string{}, permissions...)) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ManagedFoldersTestIamPermissionsCall) UserProject(userProject string) *ManagedFoldersTestIamPermissionsCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ManagedFoldersTestIamPermissionsCall) Fields(s ...googleapi.Field) *ManagedFoldersTestIamPermissionsCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ManagedFoldersTestIamPermissionsCall) IfNoneMatch(entityTag string) *ManagedFoldersTestIamPermissionsCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ManagedFoldersTestIamPermissionsCall) Context(ctx context.Context) *ManagedFoldersTestIamPermissionsCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ManagedFoldersTestIamPermissionsCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ManagedFoldersTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "managedFolder": c.managedFolder, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.managedFolders.testIamPermissions" call. +// Exactly one of *TestIamPermissionsResponse or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *TestIamPermissionsResponse.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ManagedFoldersTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &TestIamPermissionsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Tests a set of permissions on the given managed folder to see which, if any, are held by the caller.", + // "httpMethod": "GET", + // "id": "storage.managedFolders.testIamPermissions", + // "parameterOrder": [ + // "bucket", + // "managedFolder", + // "permissions" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket containing the managed folder.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "managedFolder": { + // "description": "The managed folder name/path.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "permissions": { + // "description": "Permissions to test.", + // "location": "query", + // "repeated": true, + // "required": true, + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/managedFolders/{managedFolder}/iam/testPermissions", + // "response": { + // "$ref": "TestIamPermissionsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + // method id "storage.notifications.delete": type NotificationsDeleteCall struct { @@ -8344,6 +10047,149 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje } +// method id "storage.objects.bulkRestore": + +type ObjectsBulkRestoreCall struct { + s *Service + bucket string + bulkrestoreobjectsrequest *BulkRestoreObjectsRequest + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// BulkRestore: Initiates a long-running bulk restore operation on the +// specified bucket. +// +// - bucket: Name of the bucket in which the object resides. +func (r *ObjectsService) BulkRestore(bucket string, bulkrestoreobjectsrequest *BulkRestoreObjectsRequest) *ObjectsBulkRestoreCall { + c := &ObjectsBulkRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.bulkrestoreobjectsrequest = bulkrestoreobjectsrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsBulkRestoreCall) Fields(s ...googleapi.Field) *ObjectsBulkRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsBulkRestoreCall) Context(ctx context.Context) *ObjectsBulkRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsBulkRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsBulkRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.bulkrestoreobjectsrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/bulkRestore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.bulkRestore" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *ObjectsBulkRestoreCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Initiates a long-running bulk restore operation on the specified bucket.", + // "httpMethod": "POST", + // "id": "storage.objects.bulkRestore", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/bulkRestore", + // "request": { + // "$ref": "BulkRestoreObjectsRequest" + // }, + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + // method id "storage.objects.compose": type ObjectsComposeCall struct { @@ -9327,6 +11173,14 @@ func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { return c } +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted object versions will be listed. The default is false. +// For more information, see Soft Delete. +func (c *ObjectsGetCall) SoftDeleted(softDeleted bool) *ObjectsGetCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + // UserProject sets the optional parameter "userProject": The project to // be billed for this request. Required for Requester Pays buckets. func (c *ObjectsGetCall) UserProject(userProject string) *ObjectsGetCall { @@ -9513,6 +11367,11 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "location": "query", // "type": "string" // }, + // "softDeleted": { + // "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + // "location": "query", + // "type": "boolean" + // }, // "userProject": { // "description": "The project to be billed for this request. Required for Requester Pays buckets.", // "location": "query", @@ -10221,6 +12080,15 @@ func (c *ObjectsListCall) EndOffset(endOffset string) *ObjectsListCall { return c } +// IncludeFoldersAsPrefixes sets the optional parameter +// "includeFoldersAsPrefixes": Only applicable if delimiter is set to +// '/'. If true, will also include folders and managed folders (besides +// objects) in the returned prefixes. +func (c *ObjectsListCall) IncludeFoldersAsPrefixes(includeFoldersAsPrefixes bool) *ObjectsListCall { + c.urlParams_.Set("includeFoldersAsPrefixes", fmt.Sprint(includeFoldersAsPrefixes)) + return c +} + // IncludeTrailingDelimiter sets the optional parameter // "includeTrailingDelimiter": If true, objects that end in exactly one // instance of delimiter will have their metadata included in items in @@ -10274,6 +12142,14 @@ func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { return c } +// SoftDeleted sets the optional parameter "softDeleted": If true, only +// soft-deleted object versions will be listed. The default is false. +// For more information, see Soft Delete. +func (c *ObjectsListCall) SoftDeleted(softDeleted bool) *ObjectsListCall { + c.urlParams_.Set("softDeleted", fmt.Sprint(softDeleted)) + return c +} + // StartOffset sets the optional parameter "startOffset": Filter results // to objects whose names are lexicographically equal to or after // startOffset. If endOffset is also set, the objects listed will have @@ -10420,6 +12296,11 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // "location": "query", // "type": "string" // }, + // "includeFoldersAsPrefixes": { + // "description": "Only applicable if delimiter is set to '/'. If true, will also include folders and managed folders (besides objects) in the returned prefixes.", + // "location": "query", + // "type": "boolean" + // }, // "includeTrailingDelimiter": { // "description": "If true, objects that end in exactly one instance of delimiter will have their metadata included in items in addition to prefixes.", // "location": "query", @@ -10461,6 +12342,11 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // "location": "query", // "type": "string" // }, + // "softDeleted": { + // "description": "If true, only soft-deleted object versions will be listed. The default is false. For more information, see Soft Delete.", + // "location": "query", + // "type": "boolean" + // }, // "startOffset": { // "description": "Filter results to objects whose names are lexicographically equal to or after startOffset. If endOffset is also set, the objects listed will have names between startOffset (inclusive) and endOffset (exclusive).", // "location": "query", @@ -10584,6 +12470,15 @@ func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int return c } +// OverrideUnlockedRetention sets the optional parameter +// "overrideUnlockedRetention": Must be true to remove the retention +// configuration, reduce its unlocked retention period, or change its +// mode from unlocked to locked. +func (c *ObjectsPatchCall) OverrideUnlockedRetention(overrideUnlockedRetention bool) *ObjectsPatchCall { + c.urlParams_.Set("overrideUnlockedRetention", fmt.Sprint(overrideUnlockedRetention)) + return c +} + // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this object. // @@ -10775,6 +12670,11 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "required": true, // "type": "string" // }, + // "overrideUnlockedRetention": { + // "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + // "location": "query", + // "type": "boolean" + // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this object.", // "enum": [ @@ -10830,6 +12730,277 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { } +// method id "storage.objects.restore": + +type ObjectsRestoreCall struct { + s *Service + bucket string + object string + object2 *Object + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Restore: Restores a soft-deleted object. +// +// - bucket: Name of the bucket in which the object resides. +// - generation: Selects a specific revision of this object. +// - object: Name of the object. For information about how to URL encode +// object names to be path safe, see Encoding URI Path Parts. +func (r *ObjectsService) Restore(bucket string, object string, object2 *Object) *ObjectsRestoreCall { + c := &ObjectsRestoreCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.object = object + c.object2 = object2 + return c +} + +// CopySourceAcl sets the optional parameter "copySourceAcl": If true, +// copies the source object's ACL; otherwise, uses the bucket's default +// object ACL. The default is false. +func (c *ObjectsRestoreCall) CopySourceAcl(copySourceAcl bool) *ObjectsRestoreCall { + c.urlParams_.Set("copySourceAcl", fmt.Sprint(copySourceAcl)) + return c +} + +// IfGenerationMatch sets the optional parameter "ifGenerationMatch": +// Makes the operation conditional on whether the object's one live +// generation matches the given value. Setting to 0 makes the operation +// succeed only if there are no live versions of the object. +func (c *ObjectsRestoreCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch)) + return c +} + +// IfGenerationNotMatch sets the optional parameter +// "ifGenerationNotMatch": Makes the operation conditional on whether +// none of the object's live generations match the given value. If no +// live object exists, the precondition fails. Setting to 0 makes the +// operation succeed only if there is a live version of the object. +func (c *ObjectsRestoreCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch)) + return c +} + +// IfMetagenerationMatch sets the optional parameter +// "ifMetagenerationMatch": Makes the operation conditional on whether +// the object's one live metageneration matches the given value. +func (c *ObjectsRestoreCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch)) + return c +} + +// IfMetagenerationNotMatch sets the optional parameter +// "ifMetagenerationNotMatch": Makes the operation conditional on +// whether none of the object's live metagenerations match the given +// value. +func (c *ObjectsRestoreCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRestoreCall { + c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch)) + return c +} + +// Projection sets the optional parameter "projection": Set of +// properties to return. Defaults to full. +// +// Possible values: +// +// "full" - Include all properties. +// "noAcl" - Omit the owner, acl property. +func (c *ObjectsRestoreCall) Projection(projection string) *ObjectsRestoreCall { + c.urlParams_.Set("projection", projection) + return c +} + +// UserProject sets the optional parameter "userProject": The project to +// be billed for this request. Required for Requester Pays buckets. +func (c *ObjectsRestoreCall) UserProject(userProject string) *ObjectsRestoreCall { + c.urlParams_.Set("userProject", userProject) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ObjectsRestoreCall) Fields(s ...googleapi.Field) *ObjectsRestoreCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ObjectsRestoreCall) Context(ctx context.Context) *ObjectsRestoreCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *ObjectsRestoreCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *ObjectsRestoreCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/restore") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "object": c.object, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.objects.restore" call. +// Exactly one of *Object or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Object.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ObjectsRestoreCall) Do(opts ...googleapi.CallOption) (*Object, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &Object{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Restores a soft-deleted object.", + // "httpMethod": "POST", + // "id": "storage.objects.restore", + // "parameterOrder": [ + // "bucket", + // "object" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which the object resides.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "copySourceAcl": { + // "description": "If true, copies the source object's ACL; otherwise, uses the bucket's default object ACL. The default is false.", + // "location": "query", + // "type": "boolean" + // }, + // "generation": { + // "description": "Selects a specific revision of this object.", + // "format": "int64", + // "location": "query", + // "required": true, + // "type": "string" + // }, + // "ifGenerationMatch": { + // "description": "Makes the operation conditional on whether the object's one live generation matches the given value. Setting to 0 makes the operation succeed only if there are no live versions of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifGenerationNotMatch": { + // "description": "Makes the operation conditional on whether none of the object's live generations match the given value. If no live object exists, the precondition fails. Setting to 0 makes the operation succeed only if there is a live version of the object.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationMatch": { + // "description": "Makes the operation conditional on whether the object's one live metageneration matches the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "ifMetagenerationNotMatch": { + // "description": "Makes the operation conditional on whether none of the object's live metagenerations match the given value.", + // "format": "int64", + // "location": "query", + // "type": "string" + // }, + // "object": { + // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "projection": { + // "description": "Set of properties to return. Defaults to full.", + // "enum": [ + // "full", + // "noAcl" + // ], + // "enumDescriptions": [ + // "Include all properties.", + // "Omit the owner, acl property." + // ], + // "location": "query", + // "type": "string" + // }, + // "userProject": { + // "description": "The project to be billed for this request. Required for Requester Pays buckets.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/o/{object}/restore", + // "request": { + // "$ref": "Object" + // }, + // "response": { + // "$ref": "Object" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control" + // ] + // } + +} + // method id "storage.objects.rewrite": type ObjectsRewriteCall struct { @@ -11743,6 +13914,15 @@ func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch in return c } +// OverrideUnlockedRetention sets the optional parameter +// "overrideUnlockedRetention": Must be true to remove the retention +// configuration, reduce its unlocked retention period, or change its +// mode from unlocked to locked. +func (c *ObjectsUpdateCall) OverrideUnlockedRetention(overrideUnlockedRetention bool) *ObjectsUpdateCall { + c.urlParams_.Set("overrideUnlockedRetention", fmt.Sprint(overrideUnlockedRetention)) + return c +} + // PredefinedAcl sets the optional parameter "predefinedAcl": Apply a // predefined set of access controls to this object. // @@ -11934,6 +14114,11 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { // "required": true, // "type": "string" // }, + // "overrideUnlockedRetention": { + // "description": "Must be true to remove the retention configuration, reduce its unlocked retention period, or change its mode from unlocked to locked.", + // "location": "query", + // "type": "boolean" + // }, // "predefinedAcl": { // "description": "Apply a predefined set of access controls to this object.", // "enum": [ @@ -12286,6 +14471,495 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) } +// method id "storage.buckets.operations.cancel": + +type OperationsCancelCall struct { + s *Service + bucket string + operationId string + urlParams_ gensupport.URLParams + ctx_ context.Context + header_ http.Header +} + +// Cancel: Starts asynchronous cancellation on a long-running operation. +// The server makes a best effort to cancel the operation, but success +// is not guaranteed. +// +// - bucket: The parent bucket of the operation resource. +// - operationId: The ID of the operation resource. +func (r *OperationsService) Cancel(bucket string, operationId string) *OperationsCancelCall { + c := &OperationsCancelCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.operationId = operationId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OperationsCancelCall) Fields(s ...googleapi.Field) *OperationsCancelCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OperationsCancelCall) Context(ctx context.Context) *OperationsCancelCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OperationsCancelCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsCancelCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}/cancel") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("POST", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "operationId": c.operationId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.operations.cancel" call. +func (c *OperationsCancelCall) Do(opts ...googleapi.CallOption) error { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if err != nil { + return err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return gensupport.WrapError(err) + } + return nil + // { + // "description": "Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed.", + // "httpMethod": "POST", + // "id": "storage.buckets.operations.cancel", + // "parameterOrder": [ + // "bucket", + // "operationId" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the operation resource.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "operationId": { + // "description": "The ID of the operation resource.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/operations/{operationId}/cancel", + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.operations.get": + +type OperationsGetCall struct { + s *Service + bucket string + operationId string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// Get: Gets the latest state of a long-running operation. +// +// - bucket: The parent bucket of the operation resource. +// - operationId: The ID of the operation resource. +func (r *OperationsService) Get(bucket string, operationId string) *OperationsGetCall { + c := &OperationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + c.operationId = operationId + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OperationsGetCall) Fields(s ...googleapi.Field) *OperationsGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OperationsGetCall) IfNoneMatch(entityTag string) *OperationsGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OperationsGetCall) Context(ctx context.Context) *OperationsGetCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OperationsGetCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations/{operationId}") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + "operationId": c.operationId, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.operations.get" call. +// Exactly one of *GoogleLongrunningOperation or error will be non-nil. +// Any non-2xx status code is an error. Response headers are in either +// *GoogleLongrunningOperation.ServerResponse.Header or (if a response +// was returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OperationsGetCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningOperation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningOperation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Gets the latest state of a long-running operation.", + // "httpMethod": "GET", + // "id": "storage.buckets.operations.get", + // "parameterOrder": [ + // "bucket", + // "operationId" + // ], + // "parameters": { + // "bucket": { + // "description": "The parent bucket of the operation resource.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "operationId": { + // "description": "The ID of the operation resource.", + // "location": "path", + // "required": true, + // "type": "string" + // } + // }, + // "path": "b/{bucket}/operations/{operationId}", + // "response": { + // "$ref": "GoogleLongrunningOperation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// method id "storage.buckets.operations.list": + +type OperationsListCall struct { + s *Service + bucket string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context + header_ http.Header +} + +// List: Lists operations that match the specified filter in the +// request. +// +// - bucket: Name of the bucket in which to look for operations. +func (r *OperationsService) List(bucket string) *OperationsListCall { + c := &OperationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.bucket = bucket + return c +} + +// Filter sets the optional parameter "filter": A filter to narrow down +// results to a preferred subset. The filtering language is documented +// in more detail in AIP-160 (https://google.aip.dev/160). +func (c *OperationsListCall) Filter(filter string) *OperationsListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// PageSize sets the optional parameter "pageSize": Maximum number of +// items to return in a single page of responses. Fewer total results +// may be returned than requested. The service uses this parameter or +// 100 items, whichever is smaller. +func (c *OperationsListCall) PageSize(pageSize int64) *OperationsListCall { + c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) + return c +} + +// PageToken sets the optional parameter "pageToken": A +// previously-returned page token representing part of the larger set of +// results to view. +func (c *OperationsListCall) PageToken(pageToken string) *OperationsListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *OperationsListCall) Fields(s ...googleapi.Field) *OperationsListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *OperationsListCall) IfNoneMatch(entityTag string) *OperationsListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *OperationsListCall) Context(ctx context.Context) *OperationsListCall { + c.ctx_ = ctx + return c +} + +// Header returns an http.Header that can be modified by the caller to +// add HTTP headers to the request. +func (c *OperationsListCall) Header() http.Header { + if c.header_ == nil { + c.header_ = make(http.Header) + } + return c.header_ +} + +func (c *OperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/"+internal.Version) + for k, v := range c.header_ { + reqHeaders[k] = v + } + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + c.urlParams_.Set("prettyPrint", "false") + urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/operations") + urls += "?" + c.urlParams_.Encode() + req, err := http.NewRequest("GET", urls, body) + if err != nil { + return nil, err + } + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "bucket": c.bucket, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "storage.buckets.operations.list" call. +// Exactly one of *GoogleLongrunningListOperationsResponse or error will +// be non-nil. Any non-2xx status code is an error. Response headers are +// in either +// *GoogleLongrunningListOperationsResponse.ServerResponse.Header or (if +// a response was returned at all) in error.(*googleapi.Error).Header. +// Use googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *OperationsListCall) Do(opts ...googleapi.CallOption) (*GoogleLongrunningListOperationsResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, gensupport.WrapError(&googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + }) + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, gensupport.WrapError(err) + } + ret := &GoogleLongrunningListOperationsResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := gensupport.DecodeResponse(target, res); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Lists operations that match the specified filter in the request.", + // "httpMethod": "GET", + // "id": "storage.buckets.operations.list", + // "parameterOrder": [ + // "bucket" + // ], + // "parameters": { + // "bucket": { + // "description": "Name of the bucket in which to look for operations.", + // "location": "path", + // "required": true, + // "type": "string" + // }, + // "filter": { + // "description": "A filter to narrow down results to a preferred subset. The filtering language is documented in more detail in [AIP-160](https://google.aip.dev/160).", + // "location": "query", + // "type": "string" + // }, + // "pageSize": { + // "description": "Maximum number of items to return in a single page of responses. Fewer total results may be returned than requested. The service uses this parameter or 100 items, whichever is smaller.", + // "format": "int32", + // "location": "query", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "A previously-returned page token representing part of the larger set of results to view.", + // "location": "query", + // "type": "string" + // } + // }, + // "path": "b/{bucket}/operations", + // "response": { + // "$ref": "GoogleLongrunningListOperationsResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/cloud-platform.read-only", + // "https://www.googleapis.com/auth/devstorage.full_control", + // "https://www.googleapis.com/auth/devstorage.read_only", + // "https://www.googleapis.com/auth/devstorage.read_write" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *OperationsListCall) Pages(ctx context.Context, f func(*GoogleLongrunningListOperationsResponse) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + // method id "storage.projects.hmacKeys.create": type ProjectsHmacKeysCreateCall struct { diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index e1403e08e..e36d7589e 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -35,9 +35,6 @@ const disableDirectPath = "GOOGLE_CLOUD_DISABLE_DIRECT_PATH" // Check env to decide if using google-c2p resolver for DirectPath traffic. const enableDirectPathXds = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS" -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineDialerHook func(context.Context) grpc.DialOption - // Set at init time by dial_socketopt.go. If nil, socketopt is not supported. var timeoutDialerOption grpc.DialOption @@ -186,12 +183,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C } } - if appengineDialerHook != nil { - // Use the Socket API on App Engine. - // appengine dialer will override socketopt dialer - grpcOpts = append(grpcOpts, appengineDialerHook(ctx)) - } - // Add tracing, but before the other options, so that clients can override the // gRPC stats handler. // This assumes that gRPC options are processed in order, left to right. diff --git a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go b/vendor/google.golang.org/api/transport/grpc/dial_appengine.go deleted file mode 100644 index fd3dc0565..000000000 --- a/vendor/google.golang.org/api/transport/grpc/dial_appengine.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package grpc - -import ( - "context" - "net" - "time" - - "google.golang.org/appengine" - "google.golang.org/appengine/socket" - "google.golang.org/grpc" -) - -func init() { - // NOTE: dev_appserver doesn't currently support SSL. - // When it does, this code can be removed. - if appengine.IsDevAppServer() { - return - } - - appengineDialerHook = func(ctx context.Context) grpc.DialOption { - return grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return socket.DialTimeout(ctx, "tcp", addr, timeout) - }) - } -} diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index eca0c3ba7..a07362ffd 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -145,22 +145,13 @@ func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error return rt.RoundTrip(&newReq) } -// Set at init time by dial_appengine.go. If nil, we're not on App Engine. -var appengineUrlfetchHook func(context.Context) http.RoundTripper - -// defaultBaseTransport returns the base HTTP transport. -// On App Engine, this is urlfetch.Transport. -// Otherwise, use a default transport, taking most defaults from -// http.DefaultTransport. +// defaultBaseTransport returns the base HTTP transport. It uses a default +// transport, taking most defaults from http.DefaultTransport. // If TLSCertificate is available, set TLSClientConfig as well. func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper { - if appengineUrlfetchHook != nil { - return appengineUrlfetchHook(ctx) - } - // Copy http.DefaultTransport except for MaxIdleConnsPerHost setting, - // which is increased due to reported performance issues under load in the GCS - // client. Transport.Clone is only available in Go 1.13 and up. + // which is increased due to reported performance issues under load in the + // GCS client. Transport.Clone is only available in Go 1.13 and up. trans := clonedTransport(http.DefaultTransport) if trans == nil { trans = fallbackBaseTransport() diff --git a/vendor/google.golang.org/api/transport/http/dial_appengine.go b/vendor/google.golang.org/api/transport/http/dial_appengine.go deleted file mode 100644 index f064e133f..000000000 --- a/vendor/google.golang.org/api/transport/http/dial_appengine.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2016 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build appengine -// +build appengine - -package http - -import ( - "context" - "net/http" - - "google.golang.org/appengine/urlfetch" -) - -func init() { - appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper { - return &urlfetch.Transport{Context: ctx} - } -} diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml deleted file mode 100644 index 6d03f4d36..000000000 --- a/vendor/google.golang.org/appengine/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go_import_path: google.golang.org/appengine - -install: - - ./travis_install.sh - -script: - - ./travis_test.sh - -matrix: - include: - - go: 1.9.x - env: GOAPP=true - - go: 1.10.x - env: GOAPP=false - - go: 1.11.x - env: GO111MODULE=on diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md index ffc298520..289693613 100644 --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md @@ -19,14 +19,12 @@ ## Running system tests -Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. - Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. -Run tests with `goapp test`: +Run tests with `go test`: ``` -goapp test -v google.golang.org/appengine/... +go test -v google.golang.org/appengine/... ``` ## Contributor License Agreements diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md index 9fdbacd3c..5ccddd999 100644 --- a/vendor/google.golang.org/appengine/README.md +++ b/vendor/google.golang.org/appengine/README.md @@ -1,6 +1,6 @@ # Go App Engine packages -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) +[![CI Status](https://github.com/golang/appengine/actions/workflows/ci.yml/badge.svg)](https://github.com/golang/appengine/actions/workflows/ci.yml) This repository supports the Go runtime on *App Engine standard*. It provides APIs for interacting with App Engine services. @@ -51,7 +51,7 @@ code importing `appengine/datastore` will now need to import `google.golang.org/ Most App Engine services are available with exactly the same API. A few APIs were cleaned up, and there are some differences: -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* `appengine.Context` has been replaced with the `Context` type from `context`. * Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. * `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. * `appengine.Datacenter` now takes a `context.Context` argument. @@ -72,7 +72,7 @@ A few APIs were cleaned up, and there are some differences: * `appengine/socket` is not required on App Engine flexible environment / Managed VMs. Use the standard `net` package instead. -## Key Encode/Decode compatibiltiy to help with datastore library migrations +## Key Encode/Decode compatibility to help with datastore library migrations Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go index 8c9697674..35ba9c896 100644 --- a/vendor/google.golang.org/appengine/appengine.go +++ b/vendor/google.golang.org/appengine/appengine.go @@ -9,10 +9,10 @@ package appengine // import "google.golang.org/appengine" import ( + "context" "net/http" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" ) @@ -35,18 +35,18 @@ import ( // // Main is designed so that the app's main package looks like this: // -// package main +// package main // -// import ( -// "google.golang.org/appengine" +// import ( +// "google.golang.org/appengine" // -// _ "myapp/package0" -// _ "myapp/package1" -// ) +// _ "myapp/package0" +// _ "myapp/package1" +// ) // -// func main() { -// appengine.Main() -// } +// func main() { +// appengine.Main() +// } // // The "myapp/packageX" packages are expected to register HTTP handlers // in their init functions. @@ -54,6 +54,9 @@ func Main() { internal.Main() } +// Middleware wraps an http handler so that it can make GAE API calls +var Middleware func(http.Handler) http.Handler = internal.Middleware + // IsDevAppServer reports whether the App Engine app is running in the // development App Server. func IsDevAppServer() bool { diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go index f4b645aad..6e1d041cd 100644 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ b/vendor/google.golang.org/appengine/appengine_vm.go @@ -2,19 +2,19 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package appengine import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" + "context" ) // BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". +// +// Deprecated: App Engine no longer has a special background context. +// Just use context.Background(). func BackgroundContext() context.Context { - return internal.BackgroundContext() + return context.Background() } diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go index b8dcf8f36..1202fc1a5 100644 --- a/vendor/google.golang.org/appengine/identity.go +++ b/vendor/google.golang.org/appengine/identity.go @@ -5,10 +5,9 @@ package appengine import ( + "context" "time" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/app_identity" modpb "google.golang.org/appengine/internal/modules" diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 721053c20..0569f5dd4 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -2,12 +2,14 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -24,7 +26,6 @@ import ( "time" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" @@ -32,8 +33,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -65,21 +65,22 @@ var ( IdleConnTimeout: 90 * time.Second, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) -func apiURL() *url.URL { +func apiURL(ctx context.Context) *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } + if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { + host = hostOverride.(string) + } if p := os.Getenv("API_PORT"); p != "" { port = p } + if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { + port = portOverride.(string) + } return &url.URL{ Scheme: "http", Host: host + ":" + port, @@ -87,82 +88,97 @@ func apiURL() *url.URL { } } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed +// Middleware wraps an http handler so that it can make GAE API calls +func Middleware(next http.Handler) http.Handler { + return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 +func handleHTTPMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c := &aeContext{ + req: r, + outHeader: w.Header(), } - }() + r = r.WithContext(withContext(r.Context(), c)) + c.req = r - http.DefaultServeMux.ServeHTTP(c, r) + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } + + if logToLogservice() { + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + } + + next.ServeHTTP(c, r) + c.outHeader = nil // make sure header changes aren't respected any more + + flushed := make(chan struct{}) + if logToLogservice() { + stopFlushing <- 1 // any logging beyond this point will be dropped + + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } + + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) +} + +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() + + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, + Host: apiURL.Host, } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e3..87c33c798 100644 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b..5b95c13d9 100644 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e42..0f95aa91d 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45e..5ad3548bf 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5e..4201b6b58 100644 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d8067263..18ddda3a4 100644 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go index 1e765312f..afd0ae84f 100644 --- a/vendor/google.golang.org/appengine/internal/main.go +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a333..86a8caf06 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go b/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go deleted file mode 100644 index 4ec872e46..000000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go +++ /dev/null @@ -1,2822 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/socket/socket_service.proto - -package socket - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RemoteSocketServiceError_ErrorCode int32 - -const ( - RemoteSocketServiceError_SYSTEM_ERROR RemoteSocketServiceError_ErrorCode = 1 - RemoteSocketServiceError_GAI_ERROR RemoteSocketServiceError_ErrorCode = 2 - RemoteSocketServiceError_FAILURE RemoteSocketServiceError_ErrorCode = 4 - RemoteSocketServiceError_PERMISSION_DENIED RemoteSocketServiceError_ErrorCode = 5 - RemoteSocketServiceError_INVALID_REQUEST RemoteSocketServiceError_ErrorCode = 6 - RemoteSocketServiceError_SOCKET_CLOSED RemoteSocketServiceError_ErrorCode = 7 -) - -var RemoteSocketServiceError_ErrorCode_name = map[int32]string{ - 1: "SYSTEM_ERROR", - 2: "GAI_ERROR", - 4: "FAILURE", - 5: "PERMISSION_DENIED", - 6: "INVALID_REQUEST", - 7: "SOCKET_CLOSED", -} -var RemoteSocketServiceError_ErrorCode_value = map[string]int32{ - "SYSTEM_ERROR": 1, - "GAI_ERROR": 2, - "FAILURE": 4, - "PERMISSION_DENIED": 5, - "INVALID_REQUEST": 6, - "SOCKET_CLOSED": 7, -} - -func (x RemoteSocketServiceError_ErrorCode) Enum() *RemoteSocketServiceError_ErrorCode { - p := new(RemoteSocketServiceError_ErrorCode) - *p = x - return p -} -func (x RemoteSocketServiceError_ErrorCode) String() string { - return proto.EnumName(RemoteSocketServiceError_ErrorCode_name, int32(x)) -} -func (x *RemoteSocketServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_ErrorCode_value, data, "RemoteSocketServiceError_ErrorCode") - if err != nil { - return err - } - *x = RemoteSocketServiceError_ErrorCode(value) - return nil -} -func (RemoteSocketServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 0} -} - -type RemoteSocketServiceError_SystemError int32 - -const ( - RemoteSocketServiceError_SYS_SUCCESS RemoteSocketServiceError_SystemError = 0 - RemoteSocketServiceError_SYS_EPERM RemoteSocketServiceError_SystemError = 1 - RemoteSocketServiceError_SYS_ENOENT RemoteSocketServiceError_SystemError = 2 - RemoteSocketServiceError_SYS_ESRCH RemoteSocketServiceError_SystemError = 3 - RemoteSocketServiceError_SYS_EINTR RemoteSocketServiceError_SystemError = 4 - RemoteSocketServiceError_SYS_EIO RemoteSocketServiceError_SystemError = 5 - RemoteSocketServiceError_SYS_ENXIO RemoteSocketServiceError_SystemError = 6 - RemoteSocketServiceError_SYS_E2BIG RemoteSocketServiceError_SystemError = 7 - RemoteSocketServiceError_SYS_ENOEXEC RemoteSocketServiceError_SystemError = 8 - RemoteSocketServiceError_SYS_EBADF RemoteSocketServiceError_SystemError = 9 - RemoteSocketServiceError_SYS_ECHILD RemoteSocketServiceError_SystemError = 10 - RemoteSocketServiceError_SYS_EAGAIN RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_EWOULDBLOCK RemoteSocketServiceError_SystemError = 11 - RemoteSocketServiceError_SYS_ENOMEM RemoteSocketServiceError_SystemError = 12 - RemoteSocketServiceError_SYS_EACCES RemoteSocketServiceError_SystemError = 13 - RemoteSocketServiceError_SYS_EFAULT RemoteSocketServiceError_SystemError = 14 - RemoteSocketServiceError_SYS_ENOTBLK RemoteSocketServiceError_SystemError = 15 - RemoteSocketServiceError_SYS_EBUSY RemoteSocketServiceError_SystemError = 16 - RemoteSocketServiceError_SYS_EEXIST RemoteSocketServiceError_SystemError = 17 - RemoteSocketServiceError_SYS_EXDEV RemoteSocketServiceError_SystemError = 18 - RemoteSocketServiceError_SYS_ENODEV RemoteSocketServiceError_SystemError = 19 - RemoteSocketServiceError_SYS_ENOTDIR RemoteSocketServiceError_SystemError = 20 - RemoteSocketServiceError_SYS_EISDIR RemoteSocketServiceError_SystemError = 21 - RemoteSocketServiceError_SYS_EINVAL RemoteSocketServiceError_SystemError = 22 - RemoteSocketServiceError_SYS_ENFILE RemoteSocketServiceError_SystemError = 23 - RemoteSocketServiceError_SYS_EMFILE RemoteSocketServiceError_SystemError = 24 - RemoteSocketServiceError_SYS_ENOTTY RemoteSocketServiceError_SystemError = 25 - RemoteSocketServiceError_SYS_ETXTBSY RemoteSocketServiceError_SystemError = 26 - RemoteSocketServiceError_SYS_EFBIG RemoteSocketServiceError_SystemError = 27 - RemoteSocketServiceError_SYS_ENOSPC RemoteSocketServiceError_SystemError = 28 - RemoteSocketServiceError_SYS_ESPIPE RemoteSocketServiceError_SystemError = 29 - RemoteSocketServiceError_SYS_EROFS RemoteSocketServiceError_SystemError = 30 - RemoteSocketServiceError_SYS_EMLINK RemoteSocketServiceError_SystemError = 31 - RemoteSocketServiceError_SYS_EPIPE RemoteSocketServiceError_SystemError = 32 - RemoteSocketServiceError_SYS_EDOM RemoteSocketServiceError_SystemError = 33 - RemoteSocketServiceError_SYS_ERANGE RemoteSocketServiceError_SystemError = 34 - RemoteSocketServiceError_SYS_EDEADLK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_EDEADLOCK RemoteSocketServiceError_SystemError = 35 - RemoteSocketServiceError_SYS_ENAMETOOLONG RemoteSocketServiceError_SystemError = 36 - RemoteSocketServiceError_SYS_ENOLCK RemoteSocketServiceError_SystemError = 37 - RemoteSocketServiceError_SYS_ENOSYS RemoteSocketServiceError_SystemError = 38 - RemoteSocketServiceError_SYS_ENOTEMPTY RemoteSocketServiceError_SystemError = 39 - RemoteSocketServiceError_SYS_ELOOP RemoteSocketServiceError_SystemError = 40 - RemoteSocketServiceError_SYS_ENOMSG RemoteSocketServiceError_SystemError = 42 - RemoteSocketServiceError_SYS_EIDRM RemoteSocketServiceError_SystemError = 43 - RemoteSocketServiceError_SYS_ECHRNG RemoteSocketServiceError_SystemError = 44 - RemoteSocketServiceError_SYS_EL2NSYNC RemoteSocketServiceError_SystemError = 45 - RemoteSocketServiceError_SYS_EL3HLT RemoteSocketServiceError_SystemError = 46 - RemoteSocketServiceError_SYS_EL3RST RemoteSocketServiceError_SystemError = 47 - RemoteSocketServiceError_SYS_ELNRNG RemoteSocketServiceError_SystemError = 48 - RemoteSocketServiceError_SYS_EUNATCH RemoteSocketServiceError_SystemError = 49 - RemoteSocketServiceError_SYS_ENOCSI RemoteSocketServiceError_SystemError = 50 - RemoteSocketServiceError_SYS_EL2HLT RemoteSocketServiceError_SystemError = 51 - RemoteSocketServiceError_SYS_EBADE RemoteSocketServiceError_SystemError = 52 - RemoteSocketServiceError_SYS_EBADR RemoteSocketServiceError_SystemError = 53 - RemoteSocketServiceError_SYS_EXFULL RemoteSocketServiceError_SystemError = 54 - RemoteSocketServiceError_SYS_ENOANO RemoteSocketServiceError_SystemError = 55 - RemoteSocketServiceError_SYS_EBADRQC RemoteSocketServiceError_SystemError = 56 - RemoteSocketServiceError_SYS_EBADSLT RemoteSocketServiceError_SystemError = 57 - RemoteSocketServiceError_SYS_EBFONT RemoteSocketServiceError_SystemError = 59 - RemoteSocketServiceError_SYS_ENOSTR RemoteSocketServiceError_SystemError = 60 - RemoteSocketServiceError_SYS_ENODATA RemoteSocketServiceError_SystemError = 61 - RemoteSocketServiceError_SYS_ETIME RemoteSocketServiceError_SystemError = 62 - RemoteSocketServiceError_SYS_ENOSR RemoteSocketServiceError_SystemError = 63 - RemoteSocketServiceError_SYS_ENONET RemoteSocketServiceError_SystemError = 64 - RemoteSocketServiceError_SYS_ENOPKG RemoteSocketServiceError_SystemError = 65 - RemoteSocketServiceError_SYS_EREMOTE RemoteSocketServiceError_SystemError = 66 - RemoteSocketServiceError_SYS_ENOLINK RemoteSocketServiceError_SystemError = 67 - RemoteSocketServiceError_SYS_EADV RemoteSocketServiceError_SystemError = 68 - RemoteSocketServiceError_SYS_ESRMNT RemoteSocketServiceError_SystemError = 69 - RemoteSocketServiceError_SYS_ECOMM RemoteSocketServiceError_SystemError = 70 - RemoteSocketServiceError_SYS_EPROTO RemoteSocketServiceError_SystemError = 71 - RemoteSocketServiceError_SYS_EMULTIHOP RemoteSocketServiceError_SystemError = 72 - RemoteSocketServiceError_SYS_EDOTDOT RemoteSocketServiceError_SystemError = 73 - RemoteSocketServiceError_SYS_EBADMSG RemoteSocketServiceError_SystemError = 74 - RemoteSocketServiceError_SYS_EOVERFLOW RemoteSocketServiceError_SystemError = 75 - RemoteSocketServiceError_SYS_ENOTUNIQ RemoteSocketServiceError_SystemError = 76 - RemoteSocketServiceError_SYS_EBADFD RemoteSocketServiceError_SystemError = 77 - RemoteSocketServiceError_SYS_EREMCHG RemoteSocketServiceError_SystemError = 78 - RemoteSocketServiceError_SYS_ELIBACC RemoteSocketServiceError_SystemError = 79 - RemoteSocketServiceError_SYS_ELIBBAD RemoteSocketServiceError_SystemError = 80 - RemoteSocketServiceError_SYS_ELIBSCN RemoteSocketServiceError_SystemError = 81 - RemoteSocketServiceError_SYS_ELIBMAX RemoteSocketServiceError_SystemError = 82 - RemoteSocketServiceError_SYS_ELIBEXEC RemoteSocketServiceError_SystemError = 83 - RemoteSocketServiceError_SYS_EILSEQ RemoteSocketServiceError_SystemError = 84 - RemoteSocketServiceError_SYS_ERESTART RemoteSocketServiceError_SystemError = 85 - RemoteSocketServiceError_SYS_ESTRPIPE RemoteSocketServiceError_SystemError = 86 - RemoteSocketServiceError_SYS_EUSERS RemoteSocketServiceError_SystemError = 87 - RemoteSocketServiceError_SYS_ENOTSOCK RemoteSocketServiceError_SystemError = 88 - RemoteSocketServiceError_SYS_EDESTADDRREQ RemoteSocketServiceError_SystemError = 89 - RemoteSocketServiceError_SYS_EMSGSIZE RemoteSocketServiceError_SystemError = 90 - RemoteSocketServiceError_SYS_EPROTOTYPE RemoteSocketServiceError_SystemError = 91 - RemoteSocketServiceError_SYS_ENOPROTOOPT RemoteSocketServiceError_SystemError = 92 - RemoteSocketServiceError_SYS_EPROTONOSUPPORT RemoteSocketServiceError_SystemError = 93 - RemoteSocketServiceError_SYS_ESOCKTNOSUPPORT RemoteSocketServiceError_SystemError = 94 - RemoteSocketServiceError_SYS_EOPNOTSUPP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_ENOTSUP RemoteSocketServiceError_SystemError = 95 - RemoteSocketServiceError_SYS_EPFNOSUPPORT RemoteSocketServiceError_SystemError = 96 - RemoteSocketServiceError_SYS_EAFNOSUPPORT RemoteSocketServiceError_SystemError = 97 - RemoteSocketServiceError_SYS_EADDRINUSE RemoteSocketServiceError_SystemError = 98 - RemoteSocketServiceError_SYS_EADDRNOTAVAIL RemoteSocketServiceError_SystemError = 99 - RemoteSocketServiceError_SYS_ENETDOWN RemoteSocketServiceError_SystemError = 100 - RemoteSocketServiceError_SYS_ENETUNREACH RemoteSocketServiceError_SystemError = 101 - RemoteSocketServiceError_SYS_ENETRESET RemoteSocketServiceError_SystemError = 102 - RemoteSocketServiceError_SYS_ECONNABORTED RemoteSocketServiceError_SystemError = 103 - RemoteSocketServiceError_SYS_ECONNRESET RemoteSocketServiceError_SystemError = 104 - RemoteSocketServiceError_SYS_ENOBUFS RemoteSocketServiceError_SystemError = 105 - RemoteSocketServiceError_SYS_EISCONN RemoteSocketServiceError_SystemError = 106 - RemoteSocketServiceError_SYS_ENOTCONN RemoteSocketServiceError_SystemError = 107 - RemoteSocketServiceError_SYS_ESHUTDOWN RemoteSocketServiceError_SystemError = 108 - RemoteSocketServiceError_SYS_ETOOMANYREFS RemoteSocketServiceError_SystemError = 109 - RemoteSocketServiceError_SYS_ETIMEDOUT RemoteSocketServiceError_SystemError = 110 - RemoteSocketServiceError_SYS_ECONNREFUSED RemoteSocketServiceError_SystemError = 111 - RemoteSocketServiceError_SYS_EHOSTDOWN RemoteSocketServiceError_SystemError = 112 - RemoteSocketServiceError_SYS_EHOSTUNREACH RemoteSocketServiceError_SystemError = 113 - RemoteSocketServiceError_SYS_EALREADY RemoteSocketServiceError_SystemError = 114 - RemoteSocketServiceError_SYS_EINPROGRESS RemoteSocketServiceError_SystemError = 115 - RemoteSocketServiceError_SYS_ESTALE RemoteSocketServiceError_SystemError = 116 - RemoteSocketServiceError_SYS_EUCLEAN RemoteSocketServiceError_SystemError = 117 - RemoteSocketServiceError_SYS_ENOTNAM RemoteSocketServiceError_SystemError = 118 - RemoteSocketServiceError_SYS_ENAVAIL RemoteSocketServiceError_SystemError = 119 - RemoteSocketServiceError_SYS_EISNAM RemoteSocketServiceError_SystemError = 120 - RemoteSocketServiceError_SYS_EREMOTEIO RemoteSocketServiceError_SystemError = 121 - RemoteSocketServiceError_SYS_EDQUOT RemoteSocketServiceError_SystemError = 122 - RemoteSocketServiceError_SYS_ENOMEDIUM RemoteSocketServiceError_SystemError = 123 - RemoteSocketServiceError_SYS_EMEDIUMTYPE RemoteSocketServiceError_SystemError = 124 - RemoteSocketServiceError_SYS_ECANCELED RemoteSocketServiceError_SystemError = 125 - RemoteSocketServiceError_SYS_ENOKEY RemoteSocketServiceError_SystemError = 126 - RemoteSocketServiceError_SYS_EKEYEXPIRED RemoteSocketServiceError_SystemError = 127 - RemoteSocketServiceError_SYS_EKEYREVOKED RemoteSocketServiceError_SystemError = 128 - RemoteSocketServiceError_SYS_EKEYREJECTED RemoteSocketServiceError_SystemError = 129 - RemoteSocketServiceError_SYS_EOWNERDEAD RemoteSocketServiceError_SystemError = 130 - RemoteSocketServiceError_SYS_ENOTRECOVERABLE RemoteSocketServiceError_SystemError = 131 - RemoteSocketServiceError_SYS_ERFKILL RemoteSocketServiceError_SystemError = 132 -) - -var RemoteSocketServiceError_SystemError_name = map[int32]string{ - 0: "SYS_SUCCESS", - 1: "SYS_EPERM", - 2: "SYS_ENOENT", - 3: "SYS_ESRCH", - 4: "SYS_EINTR", - 5: "SYS_EIO", - 6: "SYS_ENXIO", - 7: "SYS_E2BIG", - 8: "SYS_ENOEXEC", - 9: "SYS_EBADF", - 10: "SYS_ECHILD", - 11: "SYS_EAGAIN", - // Duplicate value: 11: "SYS_EWOULDBLOCK", - 12: "SYS_ENOMEM", - 13: "SYS_EACCES", - 14: "SYS_EFAULT", - 15: "SYS_ENOTBLK", - 16: "SYS_EBUSY", - 17: "SYS_EEXIST", - 18: "SYS_EXDEV", - 19: "SYS_ENODEV", - 20: "SYS_ENOTDIR", - 21: "SYS_EISDIR", - 22: "SYS_EINVAL", - 23: "SYS_ENFILE", - 24: "SYS_EMFILE", - 25: "SYS_ENOTTY", - 26: "SYS_ETXTBSY", - 27: "SYS_EFBIG", - 28: "SYS_ENOSPC", - 29: "SYS_ESPIPE", - 30: "SYS_EROFS", - 31: "SYS_EMLINK", - 32: "SYS_EPIPE", - 33: "SYS_EDOM", - 34: "SYS_ERANGE", - 35: "SYS_EDEADLK", - // Duplicate value: 35: "SYS_EDEADLOCK", - 36: "SYS_ENAMETOOLONG", - 37: "SYS_ENOLCK", - 38: "SYS_ENOSYS", - 39: "SYS_ENOTEMPTY", - 40: "SYS_ELOOP", - 42: "SYS_ENOMSG", - 43: "SYS_EIDRM", - 44: "SYS_ECHRNG", - 45: "SYS_EL2NSYNC", - 46: "SYS_EL3HLT", - 47: "SYS_EL3RST", - 48: "SYS_ELNRNG", - 49: "SYS_EUNATCH", - 50: "SYS_ENOCSI", - 51: "SYS_EL2HLT", - 52: "SYS_EBADE", - 53: "SYS_EBADR", - 54: "SYS_EXFULL", - 55: "SYS_ENOANO", - 56: "SYS_EBADRQC", - 57: "SYS_EBADSLT", - 59: "SYS_EBFONT", - 60: "SYS_ENOSTR", - 61: "SYS_ENODATA", - 62: "SYS_ETIME", - 63: "SYS_ENOSR", - 64: "SYS_ENONET", - 65: "SYS_ENOPKG", - 66: "SYS_EREMOTE", - 67: "SYS_ENOLINK", - 68: "SYS_EADV", - 69: "SYS_ESRMNT", - 70: "SYS_ECOMM", - 71: "SYS_EPROTO", - 72: "SYS_EMULTIHOP", - 73: "SYS_EDOTDOT", - 74: "SYS_EBADMSG", - 75: "SYS_EOVERFLOW", - 76: "SYS_ENOTUNIQ", - 77: "SYS_EBADFD", - 78: "SYS_EREMCHG", - 79: "SYS_ELIBACC", - 80: "SYS_ELIBBAD", - 81: "SYS_ELIBSCN", - 82: "SYS_ELIBMAX", - 83: "SYS_ELIBEXEC", - 84: "SYS_EILSEQ", - 85: "SYS_ERESTART", - 86: "SYS_ESTRPIPE", - 87: "SYS_EUSERS", - 88: "SYS_ENOTSOCK", - 89: "SYS_EDESTADDRREQ", - 90: "SYS_EMSGSIZE", - 91: "SYS_EPROTOTYPE", - 92: "SYS_ENOPROTOOPT", - 93: "SYS_EPROTONOSUPPORT", - 94: "SYS_ESOCKTNOSUPPORT", - 95: "SYS_EOPNOTSUPP", - // Duplicate value: 95: "SYS_ENOTSUP", - 96: "SYS_EPFNOSUPPORT", - 97: "SYS_EAFNOSUPPORT", - 98: "SYS_EADDRINUSE", - 99: "SYS_EADDRNOTAVAIL", - 100: "SYS_ENETDOWN", - 101: "SYS_ENETUNREACH", - 102: "SYS_ENETRESET", - 103: "SYS_ECONNABORTED", - 104: "SYS_ECONNRESET", - 105: "SYS_ENOBUFS", - 106: "SYS_EISCONN", - 107: "SYS_ENOTCONN", - 108: "SYS_ESHUTDOWN", - 109: "SYS_ETOOMANYREFS", - 110: "SYS_ETIMEDOUT", - 111: "SYS_ECONNREFUSED", - 112: "SYS_EHOSTDOWN", - 113: "SYS_EHOSTUNREACH", - 114: "SYS_EALREADY", - 115: "SYS_EINPROGRESS", - 116: "SYS_ESTALE", - 117: "SYS_EUCLEAN", - 118: "SYS_ENOTNAM", - 119: "SYS_ENAVAIL", - 120: "SYS_EISNAM", - 121: "SYS_EREMOTEIO", - 122: "SYS_EDQUOT", - 123: "SYS_ENOMEDIUM", - 124: "SYS_EMEDIUMTYPE", - 125: "SYS_ECANCELED", - 126: "SYS_ENOKEY", - 127: "SYS_EKEYEXPIRED", - 128: "SYS_EKEYREVOKED", - 129: "SYS_EKEYREJECTED", - 130: "SYS_EOWNERDEAD", - 131: "SYS_ENOTRECOVERABLE", - 132: "SYS_ERFKILL", -} -var RemoteSocketServiceError_SystemError_value = map[string]int32{ - "SYS_SUCCESS": 0, - "SYS_EPERM": 1, - "SYS_ENOENT": 2, - "SYS_ESRCH": 3, - "SYS_EINTR": 4, - "SYS_EIO": 5, - "SYS_ENXIO": 6, - "SYS_E2BIG": 7, - "SYS_ENOEXEC": 8, - "SYS_EBADF": 9, - "SYS_ECHILD": 10, - "SYS_EAGAIN": 11, - "SYS_EWOULDBLOCK": 11, - "SYS_ENOMEM": 12, - "SYS_EACCES": 13, - "SYS_EFAULT": 14, - "SYS_ENOTBLK": 15, - "SYS_EBUSY": 16, - "SYS_EEXIST": 17, - "SYS_EXDEV": 18, - "SYS_ENODEV": 19, - "SYS_ENOTDIR": 20, - "SYS_EISDIR": 21, - "SYS_EINVAL": 22, - "SYS_ENFILE": 23, - "SYS_EMFILE": 24, - "SYS_ENOTTY": 25, - "SYS_ETXTBSY": 26, - "SYS_EFBIG": 27, - "SYS_ENOSPC": 28, - "SYS_ESPIPE": 29, - "SYS_EROFS": 30, - "SYS_EMLINK": 31, - "SYS_EPIPE": 32, - "SYS_EDOM": 33, - "SYS_ERANGE": 34, - "SYS_EDEADLK": 35, - "SYS_EDEADLOCK": 35, - "SYS_ENAMETOOLONG": 36, - "SYS_ENOLCK": 37, - "SYS_ENOSYS": 38, - "SYS_ENOTEMPTY": 39, - "SYS_ELOOP": 40, - "SYS_ENOMSG": 42, - "SYS_EIDRM": 43, - "SYS_ECHRNG": 44, - "SYS_EL2NSYNC": 45, - "SYS_EL3HLT": 46, - "SYS_EL3RST": 47, - "SYS_ELNRNG": 48, - "SYS_EUNATCH": 49, - "SYS_ENOCSI": 50, - "SYS_EL2HLT": 51, - "SYS_EBADE": 52, - "SYS_EBADR": 53, - "SYS_EXFULL": 54, - "SYS_ENOANO": 55, - "SYS_EBADRQC": 56, - "SYS_EBADSLT": 57, - "SYS_EBFONT": 59, - "SYS_ENOSTR": 60, - "SYS_ENODATA": 61, - "SYS_ETIME": 62, - "SYS_ENOSR": 63, - "SYS_ENONET": 64, - "SYS_ENOPKG": 65, - "SYS_EREMOTE": 66, - "SYS_ENOLINK": 67, - "SYS_EADV": 68, - "SYS_ESRMNT": 69, - "SYS_ECOMM": 70, - "SYS_EPROTO": 71, - "SYS_EMULTIHOP": 72, - "SYS_EDOTDOT": 73, - "SYS_EBADMSG": 74, - "SYS_EOVERFLOW": 75, - "SYS_ENOTUNIQ": 76, - "SYS_EBADFD": 77, - "SYS_EREMCHG": 78, - "SYS_ELIBACC": 79, - "SYS_ELIBBAD": 80, - "SYS_ELIBSCN": 81, - "SYS_ELIBMAX": 82, - "SYS_ELIBEXEC": 83, - "SYS_EILSEQ": 84, - "SYS_ERESTART": 85, - "SYS_ESTRPIPE": 86, - "SYS_EUSERS": 87, - "SYS_ENOTSOCK": 88, - "SYS_EDESTADDRREQ": 89, - "SYS_EMSGSIZE": 90, - "SYS_EPROTOTYPE": 91, - "SYS_ENOPROTOOPT": 92, - "SYS_EPROTONOSUPPORT": 93, - "SYS_ESOCKTNOSUPPORT": 94, - "SYS_EOPNOTSUPP": 95, - "SYS_ENOTSUP": 95, - "SYS_EPFNOSUPPORT": 96, - "SYS_EAFNOSUPPORT": 97, - "SYS_EADDRINUSE": 98, - "SYS_EADDRNOTAVAIL": 99, - "SYS_ENETDOWN": 100, - "SYS_ENETUNREACH": 101, - "SYS_ENETRESET": 102, - "SYS_ECONNABORTED": 103, - "SYS_ECONNRESET": 104, - "SYS_ENOBUFS": 105, - "SYS_EISCONN": 106, - "SYS_ENOTCONN": 107, - "SYS_ESHUTDOWN": 108, - "SYS_ETOOMANYREFS": 109, - "SYS_ETIMEDOUT": 110, - "SYS_ECONNREFUSED": 111, - "SYS_EHOSTDOWN": 112, - "SYS_EHOSTUNREACH": 113, - "SYS_EALREADY": 114, - "SYS_EINPROGRESS": 115, - "SYS_ESTALE": 116, - "SYS_EUCLEAN": 117, - "SYS_ENOTNAM": 118, - "SYS_ENAVAIL": 119, - "SYS_EISNAM": 120, - "SYS_EREMOTEIO": 121, - "SYS_EDQUOT": 122, - "SYS_ENOMEDIUM": 123, - "SYS_EMEDIUMTYPE": 124, - "SYS_ECANCELED": 125, - "SYS_ENOKEY": 126, - "SYS_EKEYEXPIRED": 127, - "SYS_EKEYREVOKED": 128, - "SYS_EKEYREJECTED": 129, - "SYS_EOWNERDEAD": 130, - "SYS_ENOTRECOVERABLE": 131, - "SYS_ERFKILL": 132, -} - -func (x RemoteSocketServiceError_SystemError) Enum() *RemoteSocketServiceError_SystemError { - p := new(RemoteSocketServiceError_SystemError) - *p = x - return p -} -func (x RemoteSocketServiceError_SystemError) String() string { - return proto.EnumName(RemoteSocketServiceError_SystemError_name, int32(x)) -} -func (x *RemoteSocketServiceError_SystemError) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RemoteSocketServiceError_SystemError_value, data, "RemoteSocketServiceError_SystemError") - if err != nil { - return err - } - *x = RemoteSocketServiceError_SystemError(value) - return nil -} -func (RemoteSocketServiceError_SystemError) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0, 1} -} - -type CreateSocketRequest_SocketFamily int32 - -const ( - CreateSocketRequest_IPv4 CreateSocketRequest_SocketFamily = 1 - CreateSocketRequest_IPv6 CreateSocketRequest_SocketFamily = 2 -) - -var CreateSocketRequest_SocketFamily_name = map[int32]string{ - 1: "IPv4", - 2: "IPv6", -} -var CreateSocketRequest_SocketFamily_value = map[string]int32{ - "IPv4": 1, - "IPv6": 2, -} - -func (x CreateSocketRequest_SocketFamily) Enum() *CreateSocketRequest_SocketFamily { - p := new(CreateSocketRequest_SocketFamily) - *p = x - return p -} -func (x CreateSocketRequest_SocketFamily) String() string { - return proto.EnumName(CreateSocketRequest_SocketFamily_name, int32(x)) -} -func (x *CreateSocketRequest_SocketFamily) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketFamily_value, data, "CreateSocketRequest_SocketFamily") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketFamily(value) - return nil -} -func (CreateSocketRequest_SocketFamily) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 0} -} - -type CreateSocketRequest_SocketProtocol int32 - -const ( - CreateSocketRequest_TCP CreateSocketRequest_SocketProtocol = 1 - CreateSocketRequest_UDP CreateSocketRequest_SocketProtocol = 2 -) - -var CreateSocketRequest_SocketProtocol_name = map[int32]string{ - 1: "TCP", - 2: "UDP", -} -var CreateSocketRequest_SocketProtocol_value = map[string]int32{ - "TCP": 1, - "UDP": 2, -} - -func (x CreateSocketRequest_SocketProtocol) Enum() *CreateSocketRequest_SocketProtocol { - p := new(CreateSocketRequest_SocketProtocol) - *p = x - return p -} -func (x CreateSocketRequest_SocketProtocol) String() string { - return proto.EnumName(CreateSocketRequest_SocketProtocol_name, int32(x)) -} -func (x *CreateSocketRequest_SocketProtocol) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CreateSocketRequest_SocketProtocol_value, data, "CreateSocketRequest_SocketProtocol") - if err != nil { - return err - } - *x = CreateSocketRequest_SocketProtocol(value) - return nil -} -func (CreateSocketRequest_SocketProtocol) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2, 1} -} - -type SocketOption_SocketOptionLevel int32 - -const ( - SocketOption_SOCKET_SOL_IP SocketOption_SocketOptionLevel = 0 - SocketOption_SOCKET_SOL_SOCKET SocketOption_SocketOptionLevel = 1 - SocketOption_SOCKET_SOL_TCP SocketOption_SocketOptionLevel = 6 - SocketOption_SOCKET_SOL_UDP SocketOption_SocketOptionLevel = 17 -) - -var SocketOption_SocketOptionLevel_name = map[int32]string{ - 0: "SOCKET_SOL_IP", - 1: "SOCKET_SOL_SOCKET", - 6: "SOCKET_SOL_TCP", - 17: "SOCKET_SOL_UDP", -} -var SocketOption_SocketOptionLevel_value = map[string]int32{ - "SOCKET_SOL_IP": 0, - "SOCKET_SOL_SOCKET": 1, - "SOCKET_SOL_TCP": 6, - "SOCKET_SOL_UDP": 17, -} - -func (x SocketOption_SocketOptionLevel) Enum() *SocketOption_SocketOptionLevel { - p := new(SocketOption_SocketOptionLevel) - *p = x - return p -} -func (x SocketOption_SocketOptionLevel) String() string { - return proto.EnumName(SocketOption_SocketOptionLevel_name, int32(x)) -} -func (x *SocketOption_SocketOptionLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionLevel_value, data, "SocketOption_SocketOptionLevel") - if err != nil { - return err - } - *x = SocketOption_SocketOptionLevel(value) - return nil -} -func (SocketOption_SocketOptionLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 0} -} - -type SocketOption_SocketOptionName int32 - -const ( - SocketOption_SOCKET_SO_DEBUG SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_SO_REUSEADDR SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_SO_TYPE SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_SO_ERROR SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_SO_DONTROUTE SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_SO_BROADCAST SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_SO_SNDBUF SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_SO_RCVBUF SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_SO_KEEPALIVE SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_SO_OOBINLINE SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_SO_LINGER SocketOption_SocketOptionName = 13 - SocketOption_SOCKET_SO_RCVTIMEO SocketOption_SocketOptionName = 20 - SocketOption_SOCKET_SO_SNDTIMEO SocketOption_SocketOptionName = 21 - SocketOption_SOCKET_IP_TOS SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_IP_TTL SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_IP_HDRINCL SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_IP_OPTIONS SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_NODELAY SocketOption_SocketOptionName = 1 - SocketOption_SOCKET_TCP_MAXSEG SocketOption_SocketOptionName = 2 - SocketOption_SOCKET_TCP_CORK SocketOption_SocketOptionName = 3 - SocketOption_SOCKET_TCP_KEEPIDLE SocketOption_SocketOptionName = 4 - SocketOption_SOCKET_TCP_KEEPINTVL SocketOption_SocketOptionName = 5 - SocketOption_SOCKET_TCP_KEEPCNT SocketOption_SocketOptionName = 6 - SocketOption_SOCKET_TCP_SYNCNT SocketOption_SocketOptionName = 7 - SocketOption_SOCKET_TCP_LINGER2 SocketOption_SocketOptionName = 8 - SocketOption_SOCKET_TCP_DEFER_ACCEPT SocketOption_SocketOptionName = 9 - SocketOption_SOCKET_TCP_WINDOW_CLAMP SocketOption_SocketOptionName = 10 - SocketOption_SOCKET_TCP_INFO SocketOption_SocketOptionName = 11 - SocketOption_SOCKET_TCP_QUICKACK SocketOption_SocketOptionName = 12 -) - -var SocketOption_SocketOptionName_name = map[int32]string{ - 1: "SOCKET_SO_DEBUG", - 2: "SOCKET_SO_REUSEADDR", - 3: "SOCKET_SO_TYPE", - 4: "SOCKET_SO_ERROR", - 5: "SOCKET_SO_DONTROUTE", - 6: "SOCKET_SO_BROADCAST", - 7: "SOCKET_SO_SNDBUF", - 8: "SOCKET_SO_RCVBUF", - 9: "SOCKET_SO_KEEPALIVE", - 10: "SOCKET_SO_OOBINLINE", - 13: "SOCKET_SO_LINGER", - 20: "SOCKET_SO_RCVTIMEO", - 21: "SOCKET_SO_SNDTIMEO", - // Duplicate value: 1: "SOCKET_IP_TOS", - // Duplicate value: 2: "SOCKET_IP_TTL", - // Duplicate value: 3: "SOCKET_IP_HDRINCL", - // Duplicate value: 4: "SOCKET_IP_OPTIONS", - // Duplicate value: 1: "SOCKET_TCP_NODELAY", - // Duplicate value: 2: "SOCKET_TCP_MAXSEG", - // Duplicate value: 3: "SOCKET_TCP_CORK", - // Duplicate value: 4: "SOCKET_TCP_KEEPIDLE", - // Duplicate value: 5: "SOCKET_TCP_KEEPINTVL", - // Duplicate value: 6: "SOCKET_TCP_KEEPCNT", - // Duplicate value: 7: "SOCKET_TCP_SYNCNT", - // Duplicate value: 8: "SOCKET_TCP_LINGER2", - // Duplicate value: 9: "SOCKET_TCP_DEFER_ACCEPT", - // Duplicate value: 10: "SOCKET_TCP_WINDOW_CLAMP", - 11: "SOCKET_TCP_INFO", - 12: "SOCKET_TCP_QUICKACK", -} -var SocketOption_SocketOptionName_value = map[string]int32{ - "SOCKET_SO_DEBUG": 1, - "SOCKET_SO_REUSEADDR": 2, - "SOCKET_SO_TYPE": 3, - "SOCKET_SO_ERROR": 4, - "SOCKET_SO_DONTROUTE": 5, - "SOCKET_SO_BROADCAST": 6, - "SOCKET_SO_SNDBUF": 7, - "SOCKET_SO_RCVBUF": 8, - "SOCKET_SO_KEEPALIVE": 9, - "SOCKET_SO_OOBINLINE": 10, - "SOCKET_SO_LINGER": 13, - "SOCKET_SO_RCVTIMEO": 20, - "SOCKET_SO_SNDTIMEO": 21, - "SOCKET_IP_TOS": 1, - "SOCKET_IP_TTL": 2, - "SOCKET_IP_HDRINCL": 3, - "SOCKET_IP_OPTIONS": 4, - "SOCKET_TCP_NODELAY": 1, - "SOCKET_TCP_MAXSEG": 2, - "SOCKET_TCP_CORK": 3, - "SOCKET_TCP_KEEPIDLE": 4, - "SOCKET_TCP_KEEPINTVL": 5, - "SOCKET_TCP_KEEPCNT": 6, - "SOCKET_TCP_SYNCNT": 7, - "SOCKET_TCP_LINGER2": 8, - "SOCKET_TCP_DEFER_ACCEPT": 9, - "SOCKET_TCP_WINDOW_CLAMP": 10, - "SOCKET_TCP_INFO": 11, - "SOCKET_TCP_QUICKACK": 12, -} - -func (x SocketOption_SocketOptionName) Enum() *SocketOption_SocketOptionName { - p := new(SocketOption_SocketOptionName) - *p = x - return p -} -func (x SocketOption_SocketOptionName) String() string { - return proto.EnumName(SocketOption_SocketOptionName_name, int32(x)) -} -func (x *SocketOption_SocketOptionName) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(SocketOption_SocketOptionName_value, data, "SocketOption_SocketOptionName") - if err != nil { - return err - } - *x = SocketOption_SocketOptionName(value) - return nil -} -func (SocketOption_SocketOptionName) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10, 1} -} - -type ShutDownRequest_How int32 - -const ( - ShutDownRequest_SOCKET_SHUT_RD ShutDownRequest_How = 1 - ShutDownRequest_SOCKET_SHUT_WR ShutDownRequest_How = 2 - ShutDownRequest_SOCKET_SHUT_RDWR ShutDownRequest_How = 3 -) - -var ShutDownRequest_How_name = map[int32]string{ - 1: "SOCKET_SHUT_RD", - 2: "SOCKET_SHUT_WR", - 3: "SOCKET_SHUT_RDWR", -} -var ShutDownRequest_How_value = map[string]int32{ - "SOCKET_SHUT_RD": 1, - "SOCKET_SHUT_WR": 2, - "SOCKET_SHUT_RDWR": 3, -} - -func (x ShutDownRequest_How) Enum() *ShutDownRequest_How { - p := new(ShutDownRequest_How) - *p = x - return p -} -func (x ShutDownRequest_How) String() string { - return proto.EnumName(ShutDownRequest_How_name, int32(x)) -} -func (x *ShutDownRequest_How) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ShutDownRequest_How_value, data, "ShutDownRequest_How") - if err != nil { - return err - } - *x = ShutDownRequest_How(value) - return nil -} -func (ShutDownRequest_How) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21, 0} -} - -type ReceiveRequest_Flags int32 - -const ( - ReceiveRequest_MSG_OOB ReceiveRequest_Flags = 1 - ReceiveRequest_MSG_PEEK ReceiveRequest_Flags = 2 -) - -var ReceiveRequest_Flags_name = map[int32]string{ - 1: "MSG_OOB", - 2: "MSG_PEEK", -} -var ReceiveRequest_Flags_value = map[string]int32{ - "MSG_OOB": 1, - "MSG_PEEK": 2, -} - -func (x ReceiveRequest_Flags) Enum() *ReceiveRequest_Flags { - p := new(ReceiveRequest_Flags) - *p = x - return p -} -func (x ReceiveRequest_Flags) String() string { - return proto.EnumName(ReceiveRequest_Flags_name, int32(x)) -} -func (x *ReceiveRequest_Flags) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ReceiveRequest_Flags_value, data, "ReceiveRequest_Flags") - if err != nil { - return err - } - *x = ReceiveRequest_Flags(value) - return nil -} -func (ReceiveRequest_Flags) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27, 0} -} - -type PollEvent_PollEventFlag int32 - -const ( - PollEvent_SOCKET_POLLNONE PollEvent_PollEventFlag = 0 - PollEvent_SOCKET_POLLIN PollEvent_PollEventFlag = 1 - PollEvent_SOCKET_POLLPRI PollEvent_PollEventFlag = 2 - PollEvent_SOCKET_POLLOUT PollEvent_PollEventFlag = 4 - PollEvent_SOCKET_POLLERR PollEvent_PollEventFlag = 8 - PollEvent_SOCKET_POLLHUP PollEvent_PollEventFlag = 16 - PollEvent_SOCKET_POLLNVAL PollEvent_PollEventFlag = 32 - PollEvent_SOCKET_POLLRDNORM PollEvent_PollEventFlag = 64 - PollEvent_SOCKET_POLLRDBAND PollEvent_PollEventFlag = 128 - PollEvent_SOCKET_POLLWRNORM PollEvent_PollEventFlag = 256 - PollEvent_SOCKET_POLLWRBAND PollEvent_PollEventFlag = 512 - PollEvent_SOCKET_POLLMSG PollEvent_PollEventFlag = 1024 - PollEvent_SOCKET_POLLREMOVE PollEvent_PollEventFlag = 4096 - PollEvent_SOCKET_POLLRDHUP PollEvent_PollEventFlag = 8192 -) - -var PollEvent_PollEventFlag_name = map[int32]string{ - 0: "SOCKET_POLLNONE", - 1: "SOCKET_POLLIN", - 2: "SOCKET_POLLPRI", - 4: "SOCKET_POLLOUT", - 8: "SOCKET_POLLERR", - 16: "SOCKET_POLLHUP", - 32: "SOCKET_POLLNVAL", - 64: "SOCKET_POLLRDNORM", - 128: "SOCKET_POLLRDBAND", - 256: "SOCKET_POLLWRNORM", - 512: "SOCKET_POLLWRBAND", - 1024: "SOCKET_POLLMSG", - 4096: "SOCKET_POLLREMOVE", - 8192: "SOCKET_POLLRDHUP", -} -var PollEvent_PollEventFlag_value = map[string]int32{ - "SOCKET_POLLNONE": 0, - "SOCKET_POLLIN": 1, - "SOCKET_POLLPRI": 2, - "SOCKET_POLLOUT": 4, - "SOCKET_POLLERR": 8, - "SOCKET_POLLHUP": 16, - "SOCKET_POLLNVAL": 32, - "SOCKET_POLLRDNORM": 64, - "SOCKET_POLLRDBAND": 128, - "SOCKET_POLLWRNORM": 256, - "SOCKET_POLLWRBAND": 512, - "SOCKET_POLLMSG": 1024, - "SOCKET_POLLREMOVE": 4096, - "SOCKET_POLLRDHUP": 8192, -} - -func (x PollEvent_PollEventFlag) Enum() *PollEvent_PollEventFlag { - p := new(PollEvent_PollEventFlag) - *p = x - return p -} -func (x PollEvent_PollEventFlag) String() string { - return proto.EnumName(PollEvent_PollEventFlag_name, int32(x)) -} -func (x *PollEvent_PollEventFlag) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PollEvent_PollEventFlag_value, data, "PollEvent_PollEventFlag") - if err != nil { - return err - } - *x = PollEvent_PollEventFlag(value) - return nil -} -func (PollEvent_PollEventFlag) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29, 0} -} - -type ResolveReply_ErrorCode int32 - -const ( - ResolveReply_SOCKET_EAI_ADDRFAMILY ResolveReply_ErrorCode = 1 - ResolveReply_SOCKET_EAI_AGAIN ResolveReply_ErrorCode = 2 - ResolveReply_SOCKET_EAI_BADFLAGS ResolveReply_ErrorCode = 3 - ResolveReply_SOCKET_EAI_FAIL ResolveReply_ErrorCode = 4 - ResolveReply_SOCKET_EAI_FAMILY ResolveReply_ErrorCode = 5 - ResolveReply_SOCKET_EAI_MEMORY ResolveReply_ErrorCode = 6 - ResolveReply_SOCKET_EAI_NODATA ResolveReply_ErrorCode = 7 - ResolveReply_SOCKET_EAI_NONAME ResolveReply_ErrorCode = 8 - ResolveReply_SOCKET_EAI_SERVICE ResolveReply_ErrorCode = 9 - ResolveReply_SOCKET_EAI_SOCKTYPE ResolveReply_ErrorCode = 10 - ResolveReply_SOCKET_EAI_SYSTEM ResolveReply_ErrorCode = 11 - ResolveReply_SOCKET_EAI_BADHINTS ResolveReply_ErrorCode = 12 - ResolveReply_SOCKET_EAI_PROTOCOL ResolveReply_ErrorCode = 13 - ResolveReply_SOCKET_EAI_OVERFLOW ResolveReply_ErrorCode = 14 - ResolveReply_SOCKET_EAI_MAX ResolveReply_ErrorCode = 15 -) - -var ResolveReply_ErrorCode_name = map[int32]string{ - 1: "SOCKET_EAI_ADDRFAMILY", - 2: "SOCKET_EAI_AGAIN", - 3: "SOCKET_EAI_BADFLAGS", - 4: "SOCKET_EAI_FAIL", - 5: "SOCKET_EAI_FAMILY", - 6: "SOCKET_EAI_MEMORY", - 7: "SOCKET_EAI_NODATA", - 8: "SOCKET_EAI_NONAME", - 9: "SOCKET_EAI_SERVICE", - 10: "SOCKET_EAI_SOCKTYPE", - 11: "SOCKET_EAI_SYSTEM", - 12: "SOCKET_EAI_BADHINTS", - 13: "SOCKET_EAI_PROTOCOL", - 14: "SOCKET_EAI_OVERFLOW", - 15: "SOCKET_EAI_MAX", -} -var ResolveReply_ErrorCode_value = map[string]int32{ - "SOCKET_EAI_ADDRFAMILY": 1, - "SOCKET_EAI_AGAIN": 2, - "SOCKET_EAI_BADFLAGS": 3, - "SOCKET_EAI_FAIL": 4, - "SOCKET_EAI_FAMILY": 5, - "SOCKET_EAI_MEMORY": 6, - "SOCKET_EAI_NODATA": 7, - "SOCKET_EAI_NONAME": 8, - "SOCKET_EAI_SERVICE": 9, - "SOCKET_EAI_SOCKTYPE": 10, - "SOCKET_EAI_SYSTEM": 11, - "SOCKET_EAI_BADHINTS": 12, - "SOCKET_EAI_PROTOCOL": 13, - "SOCKET_EAI_OVERFLOW": 14, - "SOCKET_EAI_MAX": 15, -} - -func (x ResolveReply_ErrorCode) Enum() *ResolveReply_ErrorCode { - p := new(ResolveReply_ErrorCode) - *p = x - return p -} -func (x ResolveReply_ErrorCode) String() string { - return proto.EnumName(ResolveReply_ErrorCode_name, int32(x)) -} -func (x *ResolveReply_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ResolveReply_ErrorCode_value, data, "ResolveReply_ErrorCode") - if err != nil { - return err - } - *x = ResolveReply_ErrorCode(value) - return nil -} -func (ResolveReply_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33, 0} -} - -type RemoteSocketServiceError struct { - SystemError *int32 `protobuf:"varint,1,opt,name=system_error,json=systemError,def=0" json:"system_error,omitempty"` - ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail,json=errorDetail" json:"error_detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RemoteSocketServiceError) Reset() { *m = RemoteSocketServiceError{} } -func (m *RemoteSocketServiceError) String() string { return proto.CompactTextString(m) } -func (*RemoteSocketServiceError) ProtoMessage() {} -func (*RemoteSocketServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{0} -} -func (m *RemoteSocketServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RemoteSocketServiceError.Unmarshal(m, b) -} -func (m *RemoteSocketServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RemoteSocketServiceError.Marshal(b, m, deterministic) -} -func (dst *RemoteSocketServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoteSocketServiceError.Merge(dst, src) -} -func (m *RemoteSocketServiceError) XXX_Size() int { - return xxx_messageInfo_RemoteSocketServiceError.Size(m) -} -func (m *RemoteSocketServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_RemoteSocketServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_RemoteSocketServiceError proto.InternalMessageInfo - -const Default_RemoteSocketServiceError_SystemError int32 = 0 - -func (m *RemoteSocketServiceError) GetSystemError() int32 { - if m != nil && m.SystemError != nil { - return *m.SystemError - } - return Default_RemoteSocketServiceError_SystemError -} - -func (m *RemoteSocketServiceError) GetErrorDetail() string { - if m != nil && m.ErrorDetail != nil { - return *m.ErrorDetail - } - return "" -} - -type AddressPort struct { - Port *int32 `protobuf:"varint,1,req,name=port" json:"port,omitempty"` - PackedAddress []byte `protobuf:"bytes,2,opt,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - HostnameHint *string `protobuf:"bytes,3,opt,name=hostname_hint,json=hostnameHint" json:"hostname_hint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddressPort) Reset() { *m = AddressPort{} } -func (m *AddressPort) String() string { return proto.CompactTextString(m) } -func (*AddressPort) ProtoMessage() {} -func (*AddressPort) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{1} -} -func (m *AddressPort) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddressPort.Unmarshal(m, b) -} -func (m *AddressPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddressPort.Marshal(b, m, deterministic) -} -func (dst *AddressPort) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddressPort.Merge(dst, src) -} -func (m *AddressPort) XXX_Size() int { - return xxx_messageInfo_AddressPort.Size(m) -} -func (m *AddressPort) XXX_DiscardUnknown() { - xxx_messageInfo_AddressPort.DiscardUnknown(m) -} - -var xxx_messageInfo_AddressPort proto.InternalMessageInfo - -func (m *AddressPort) GetPort() int32 { - if m != nil && m.Port != nil { - return *m.Port - } - return 0 -} - -func (m *AddressPort) GetPackedAddress() []byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *AddressPort) GetHostnameHint() string { - if m != nil && m.HostnameHint != nil { - return *m.HostnameHint - } - return "" -} - -type CreateSocketRequest struct { - Family *CreateSocketRequest_SocketFamily `protobuf:"varint,1,req,name=family,enum=appengine.CreateSocketRequest_SocketFamily" json:"family,omitempty"` - Protocol *CreateSocketRequest_SocketProtocol `protobuf:"varint,2,req,name=protocol,enum=appengine.CreateSocketRequest_SocketProtocol" json:"protocol,omitempty"` - SocketOptions []*SocketOption `protobuf:"bytes,3,rep,name=socket_options,json=socketOptions" json:"socket_options,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - ListenBacklog *int32 `protobuf:"varint,5,opt,name=listen_backlog,json=listenBacklog,def=0" json:"listen_backlog,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,6,opt,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - AppId *string `protobuf:"bytes,9,opt,name=app_id,json=appId" json:"app_id,omitempty"` - ProjectId *int64 `protobuf:"varint,10,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketRequest) Reset() { *m = CreateSocketRequest{} } -func (m *CreateSocketRequest) String() string { return proto.CompactTextString(m) } -func (*CreateSocketRequest) ProtoMessage() {} -func (*CreateSocketRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{2} -} -func (m *CreateSocketRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketRequest.Unmarshal(m, b) -} -func (m *CreateSocketRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketRequest.Marshal(b, m, deterministic) -} -func (dst *CreateSocketRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketRequest.Merge(dst, src) -} -func (m *CreateSocketRequest) XXX_Size() int { - return xxx_messageInfo_CreateSocketRequest.Size(m) -} -func (m *CreateSocketRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketRequest proto.InternalMessageInfo - -const Default_CreateSocketRequest_ListenBacklog int32 = 0 - -func (m *CreateSocketRequest) GetFamily() CreateSocketRequest_SocketFamily { - if m != nil && m.Family != nil { - return *m.Family - } - return CreateSocketRequest_IPv4 -} - -func (m *CreateSocketRequest) GetProtocol() CreateSocketRequest_SocketProtocol { - if m != nil && m.Protocol != nil { - return *m.Protocol - } - return CreateSocketRequest_TCP -} - -func (m *CreateSocketRequest) GetSocketOptions() []*SocketOption { - if m != nil { - return m.SocketOptions - } - return nil -} - -func (m *CreateSocketRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -func (m *CreateSocketRequest) GetListenBacklog() int32 { - if m != nil && m.ListenBacklog != nil { - return *m.ListenBacklog - } - return Default_CreateSocketRequest_ListenBacklog -} - -func (m *CreateSocketRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *CreateSocketRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CreateSocketRequest) GetProjectId() int64 { - if m != nil && m.ProjectId != nil { - return *m.ProjectId - } - return 0 -} - -type CreateSocketReply struct { - SocketDescriptor *string `protobuf:"bytes,1,opt,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ServerAddress *AddressPort `protobuf:"bytes,3,opt,name=server_address,json=serverAddress" json:"server_address,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,4,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateSocketReply) Reset() { *m = CreateSocketReply{} } -func (m *CreateSocketReply) String() string { return proto.CompactTextString(m) } -func (*CreateSocketReply) ProtoMessage() {} -func (*CreateSocketReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{3} -} - -var extRange_CreateSocketReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*CreateSocketReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_CreateSocketReply -} -func (m *CreateSocketReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CreateSocketReply.Unmarshal(m, b) -} -func (m *CreateSocketReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CreateSocketReply.Marshal(b, m, deterministic) -} -func (dst *CreateSocketReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateSocketReply.Merge(dst, src) -} -func (m *CreateSocketReply) XXX_Size() int { - return xxx_messageInfo_CreateSocketReply.Size(m) -} -func (m *CreateSocketReply) XXX_DiscardUnknown() { - xxx_messageInfo_CreateSocketReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateSocketReply proto.InternalMessageInfo - -func (m *CreateSocketReply) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CreateSocketReply) GetServerAddress() *AddressPort { - if m != nil { - return m.ServerAddress - } - return nil -} - -func (m *CreateSocketReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - ProxyExternalIp *AddressPort `protobuf:"bytes,2,req,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindRequest) Reset() { *m = BindRequest{} } -func (m *BindRequest) String() string { return proto.CompactTextString(m) } -func (*BindRequest) ProtoMessage() {} -func (*BindRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{4} -} -func (m *BindRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindRequest.Unmarshal(m, b) -} -func (m *BindRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindRequest.Marshal(b, m, deterministic) -} -func (dst *BindRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindRequest.Merge(dst, src) -} -func (m *BindRequest) XXX_Size() int { - return xxx_messageInfo_BindRequest.Size(m) -} -func (m *BindRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BindRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BindRequest proto.InternalMessageInfo - -func (m *BindRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *BindRequest) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type BindReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BindReply) Reset() { *m = BindReply{} } -func (m *BindReply) String() string { return proto.CompactTextString(m) } -func (*BindReply) ProtoMessage() {} -func (*BindReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{5} -} -func (m *BindReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BindReply.Unmarshal(m, b) -} -func (m *BindReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BindReply.Marshal(b, m, deterministic) -} -func (dst *BindReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_BindReply.Merge(dst, src) -} -func (m *BindReply) XXX_Size() int { - return xxx_messageInfo_BindReply.Size(m) -} -func (m *BindReply) XXX_DiscardUnknown() { - xxx_messageInfo_BindReply.DiscardUnknown(m) -} - -var xxx_messageInfo_BindReply proto.InternalMessageInfo - -func (m *BindReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetSocketNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameRequest) Reset() { *m = GetSocketNameRequest{} } -func (m *GetSocketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameRequest) ProtoMessage() {} -func (*GetSocketNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{6} -} -func (m *GetSocketNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameRequest.Unmarshal(m, b) -} -func (m *GetSocketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameRequest.Merge(dst, src) -} -func (m *GetSocketNameRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketNameRequest.Size(m) -} -func (m *GetSocketNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameRequest proto.InternalMessageInfo - -func (m *GetSocketNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetSocketNameReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,2,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketNameReply) Reset() { *m = GetSocketNameReply{} } -func (m *GetSocketNameReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketNameReply) ProtoMessage() {} -func (*GetSocketNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{7} -} -func (m *GetSocketNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketNameReply.Unmarshal(m, b) -} -func (m *GetSocketNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketNameReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketNameReply.Merge(dst, src) -} -func (m *GetSocketNameReply) XXX_Size() int { - return xxx_messageInfo_GetSocketNameReply.Size(m) -} -func (m *GetSocketNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketNameReply proto.InternalMessageInfo - -func (m *GetSocketNameReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type GetPeerNameRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameRequest) Reset() { *m = GetPeerNameRequest{} } -func (m *GetPeerNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameRequest) ProtoMessage() {} -func (*GetPeerNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{8} -} -func (m *GetPeerNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameRequest.Unmarshal(m, b) -} -func (m *GetPeerNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameRequest.Merge(dst, src) -} -func (m *GetPeerNameRequest) XXX_Size() int { - return xxx_messageInfo_GetPeerNameRequest.Size(m) -} -func (m *GetPeerNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameRequest proto.InternalMessageInfo - -func (m *GetPeerNameRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -type GetPeerNameReply struct { - PeerIp *AddressPort `protobuf:"bytes,2,opt,name=peer_ip,json=peerIp" json:"peer_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPeerNameReply) Reset() { *m = GetPeerNameReply{} } -func (m *GetPeerNameReply) String() string { return proto.CompactTextString(m) } -func (*GetPeerNameReply) ProtoMessage() {} -func (*GetPeerNameReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{9} -} -func (m *GetPeerNameReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPeerNameReply.Unmarshal(m, b) -} -func (m *GetPeerNameReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPeerNameReply.Marshal(b, m, deterministic) -} -func (dst *GetPeerNameReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPeerNameReply.Merge(dst, src) -} -func (m *GetPeerNameReply) XXX_Size() int { - return xxx_messageInfo_GetPeerNameReply.Size(m) -} -func (m *GetPeerNameReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetPeerNameReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPeerNameReply proto.InternalMessageInfo - -func (m *GetPeerNameReply) GetPeerIp() *AddressPort { - if m != nil { - return m.PeerIp - } - return nil -} - -type SocketOption struct { - Level *SocketOption_SocketOptionLevel `protobuf:"varint,1,req,name=level,enum=appengine.SocketOption_SocketOptionLevel" json:"level,omitempty"` - Option *SocketOption_SocketOptionName `protobuf:"varint,2,req,name=option,enum=appengine.SocketOption_SocketOptionName" json:"option,omitempty"` - Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SocketOption) Reset() { *m = SocketOption{} } -func (m *SocketOption) String() string { return proto.CompactTextString(m) } -func (*SocketOption) ProtoMessage() {} -func (*SocketOption) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{10} -} -func (m *SocketOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SocketOption.Unmarshal(m, b) -} -func (m *SocketOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SocketOption.Marshal(b, m, deterministic) -} -func (dst *SocketOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_SocketOption.Merge(dst, src) -} -func (m *SocketOption) XXX_Size() int { - return xxx_messageInfo_SocketOption.Size(m) -} -func (m *SocketOption) XXX_DiscardUnknown() { - xxx_messageInfo_SocketOption.DiscardUnknown(m) -} - -var xxx_messageInfo_SocketOption proto.InternalMessageInfo - -func (m *SocketOption) GetLevel() SocketOption_SocketOptionLevel { - if m != nil && m.Level != nil { - return *m.Level - } - return SocketOption_SOCKET_SOL_IP -} - -func (m *SocketOption) GetOption() SocketOption_SocketOptionName { - if m != nil && m.Option != nil { - return *m.Option - } - return SocketOption_SOCKET_SO_DEBUG -} - -func (m *SocketOption) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type SetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsRequest) Reset() { *m = SetSocketOptionsRequest{} } -func (m *SetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsRequest) ProtoMessage() {} -func (*SetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{11} -} -func (m *SetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *SetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsRequest.Merge(dst, src) -} -func (m *SetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsRequest.Size(m) -} -func (m *SetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsRequest proto.InternalMessageInfo - -func (m *SetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type SetSocketOptionsReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetSocketOptionsReply) Reset() { *m = SetSocketOptionsReply{} } -func (m *SetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*SetSocketOptionsReply) ProtoMessage() {} -func (*SetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{12} -} -func (m *SetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetSocketOptionsReply.Unmarshal(m, b) -} -func (m *SetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *SetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetSocketOptionsReply.Merge(dst, src) -} -func (m *SetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_SetSocketOptionsReply.Size(m) -} -func (m *SetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_SetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SetSocketOptionsReply proto.InternalMessageInfo - -type GetSocketOptionsRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsRequest) Reset() { *m = GetSocketOptionsRequest{} } -func (m *GetSocketOptionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsRequest) ProtoMessage() {} -func (*GetSocketOptionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{13} -} -func (m *GetSocketOptionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsRequest.Unmarshal(m, b) -} -func (m *GetSocketOptionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsRequest.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsRequest.Merge(dst, src) -} -func (m *GetSocketOptionsRequest) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsRequest.Size(m) -} -func (m *GetSocketOptionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsRequest proto.InternalMessageInfo - -func (m *GetSocketOptionsRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *GetSocketOptionsRequest) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type GetSocketOptionsReply struct { - Options []*SocketOption `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetSocketOptionsReply) Reset() { *m = GetSocketOptionsReply{} } -func (m *GetSocketOptionsReply) String() string { return proto.CompactTextString(m) } -func (*GetSocketOptionsReply) ProtoMessage() {} -func (*GetSocketOptionsReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{14} -} -func (m *GetSocketOptionsReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetSocketOptionsReply.Unmarshal(m, b) -} -func (m *GetSocketOptionsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetSocketOptionsReply.Marshal(b, m, deterministic) -} -func (dst *GetSocketOptionsReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSocketOptionsReply.Merge(dst, src) -} -func (m *GetSocketOptionsReply) XXX_Size() int { - return xxx_messageInfo_GetSocketOptionsReply.Size(m) -} -func (m *GetSocketOptionsReply) XXX_DiscardUnknown() { - xxx_messageInfo_GetSocketOptionsReply.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSocketOptionsReply proto.InternalMessageInfo - -func (m *GetSocketOptionsReply) GetOptions() []*SocketOption { - if m != nil { - return m.Options - } - return nil -} - -type ConnectRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RemoteIp *AddressPort `protobuf:"bytes,2,req,name=remote_ip,json=remoteIp" json:"remote_ip,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,3,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } -func (m *ConnectRequest) String() string { return proto.CompactTextString(m) } -func (*ConnectRequest) ProtoMessage() {} -func (*ConnectRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{15} -} -func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectRequest.Unmarshal(m, b) -} -func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) -} -func (dst *ConnectRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectRequest.Merge(dst, src) -} -func (m *ConnectRequest) XXX_Size() int { - return xxx_messageInfo_ConnectRequest.Size(m) -} -func (m *ConnectRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo - -const Default_ConnectRequest_TimeoutSeconds float64 = -1 - -func (m *ConnectRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ConnectRequest) GetRemoteIp() *AddressPort { - if m != nil { - return m.RemoteIp - } - return nil -} - -func (m *ConnectRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ConnectRequest_TimeoutSeconds -} - -type ConnectReply struct { - ProxyExternalIp *AddressPort `protobuf:"bytes,1,opt,name=proxy_external_ip,json=proxyExternalIp" json:"proxy_external_ip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectReply) Reset() { *m = ConnectReply{} } -func (m *ConnectReply) String() string { return proto.CompactTextString(m) } -func (*ConnectReply) ProtoMessage() {} -func (*ConnectReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{16} -} - -var extRange_ConnectReply = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ConnectReply) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ConnectReply -} -func (m *ConnectReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnectReply.Unmarshal(m, b) -} -func (m *ConnectReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnectReply.Marshal(b, m, deterministic) -} -func (dst *ConnectReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectReply.Merge(dst, src) -} -func (m *ConnectReply) XXX_Size() int { - return xxx_messageInfo_ConnectReply.Size(m) -} -func (m *ConnectReply) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectReply proto.InternalMessageInfo - -func (m *ConnectReply) GetProxyExternalIp() *AddressPort { - if m != nil { - return m.ProxyExternalIp - } - return nil -} - -type ListenRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Backlog *int32 `protobuf:"varint,2,req,name=backlog" json:"backlog,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenRequest) Reset() { *m = ListenRequest{} } -func (m *ListenRequest) String() string { return proto.CompactTextString(m) } -func (*ListenRequest) ProtoMessage() {} -func (*ListenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{17} -} -func (m *ListenRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenRequest.Unmarshal(m, b) -} -func (m *ListenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenRequest.Marshal(b, m, deterministic) -} -func (dst *ListenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenRequest.Merge(dst, src) -} -func (m *ListenRequest) XXX_Size() int { - return xxx_messageInfo_ListenRequest.Size(m) -} -func (m *ListenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListenRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenRequest proto.InternalMessageInfo - -func (m *ListenRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ListenRequest) GetBacklog() int32 { - if m != nil && m.Backlog != nil { - return *m.Backlog - } - return 0 -} - -type ListenReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListenReply) Reset() { *m = ListenReply{} } -func (m *ListenReply) String() string { return proto.CompactTextString(m) } -func (*ListenReply) ProtoMessage() {} -func (*ListenReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{18} -} -func (m *ListenReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ListenReply.Unmarshal(m, b) -} -func (m *ListenReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ListenReply.Marshal(b, m, deterministic) -} -func (dst *ListenReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListenReply.Merge(dst, src) -} -func (m *ListenReply) XXX_Size() int { - return xxx_messageInfo_ListenReply.Size(m) -} -func (m *ListenReply) XXX_DiscardUnknown() { - xxx_messageInfo_ListenReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ListenReply proto.InternalMessageInfo - -type AcceptRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptRequest) Reset() { *m = AcceptRequest{} } -func (m *AcceptRequest) String() string { return proto.CompactTextString(m) } -func (*AcceptRequest) ProtoMessage() {} -func (*AcceptRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{19} -} -func (m *AcceptRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptRequest.Unmarshal(m, b) -} -func (m *AcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptRequest.Marshal(b, m, deterministic) -} -func (dst *AcceptRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptRequest.Merge(dst, src) -} -func (m *AcceptRequest) XXX_Size() int { - return xxx_messageInfo_AcceptRequest.Size(m) -} -func (m *AcceptRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptRequest proto.InternalMessageInfo - -const Default_AcceptRequest_TimeoutSeconds float64 = -1 - -func (m *AcceptRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *AcceptRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_AcceptRequest_TimeoutSeconds -} - -type AcceptReply struct { - NewSocketDescriptor []byte `protobuf:"bytes,2,opt,name=new_socket_descriptor,json=newSocketDescriptor" json:"new_socket_descriptor,omitempty"` - RemoteAddress *AddressPort `protobuf:"bytes,3,opt,name=remote_address,json=remoteAddress" json:"remote_address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AcceptReply) Reset() { *m = AcceptReply{} } -func (m *AcceptReply) String() string { return proto.CompactTextString(m) } -func (*AcceptReply) ProtoMessage() {} -func (*AcceptReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{20} -} -func (m *AcceptReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AcceptReply.Unmarshal(m, b) -} -func (m *AcceptReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AcceptReply.Marshal(b, m, deterministic) -} -func (dst *AcceptReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_AcceptReply.Merge(dst, src) -} -func (m *AcceptReply) XXX_Size() int { - return xxx_messageInfo_AcceptReply.Size(m) -} -func (m *AcceptReply) XXX_DiscardUnknown() { - xxx_messageInfo_AcceptReply.DiscardUnknown(m) -} - -var xxx_messageInfo_AcceptReply proto.InternalMessageInfo - -func (m *AcceptReply) GetNewSocketDescriptor() []byte { - if m != nil { - return m.NewSocketDescriptor - } - return nil -} - -func (m *AcceptReply) GetRemoteAddress() *AddressPort { - if m != nil { - return m.RemoteAddress - } - return nil -} - -type ShutDownRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - How *ShutDownRequest_How `protobuf:"varint,2,req,name=how,enum=appengine.ShutDownRequest_How" json:"how,omitempty"` - SendOffset *int64 `protobuf:"varint,3,req,name=send_offset,json=sendOffset" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownRequest) Reset() { *m = ShutDownRequest{} } -func (m *ShutDownRequest) String() string { return proto.CompactTextString(m) } -func (*ShutDownRequest) ProtoMessage() {} -func (*ShutDownRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{21} -} -func (m *ShutDownRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownRequest.Unmarshal(m, b) -} -func (m *ShutDownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownRequest.Marshal(b, m, deterministic) -} -func (dst *ShutDownRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownRequest.Merge(dst, src) -} -func (m *ShutDownRequest) XXX_Size() int { - return xxx_messageInfo_ShutDownRequest.Size(m) -} -func (m *ShutDownRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownRequest proto.InternalMessageInfo - -func (m *ShutDownRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ShutDownRequest) GetHow() ShutDownRequest_How { - if m != nil && m.How != nil { - return *m.How - } - return ShutDownRequest_SOCKET_SHUT_RD -} - -func (m *ShutDownRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return 0 -} - -type ShutDownReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutDownReply) Reset() { *m = ShutDownReply{} } -func (m *ShutDownReply) String() string { return proto.CompactTextString(m) } -func (*ShutDownReply) ProtoMessage() {} -func (*ShutDownReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{22} -} -func (m *ShutDownReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ShutDownReply.Unmarshal(m, b) -} -func (m *ShutDownReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ShutDownReply.Marshal(b, m, deterministic) -} -func (dst *ShutDownReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutDownReply.Merge(dst, src) -} -func (m *ShutDownReply) XXX_Size() int { - return xxx_messageInfo_ShutDownReply.Size(m) -} -func (m *ShutDownReply) XXX_DiscardUnknown() { - xxx_messageInfo_ShutDownReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutDownReply proto.InternalMessageInfo - -type CloseRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - SendOffset *int64 `protobuf:"varint,2,opt,name=send_offset,json=sendOffset,def=-1" json:"send_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseRequest) Reset() { *m = CloseRequest{} } -func (m *CloseRequest) String() string { return proto.CompactTextString(m) } -func (*CloseRequest) ProtoMessage() {} -func (*CloseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{23} -} -func (m *CloseRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseRequest.Unmarshal(m, b) -} -func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) -} -func (dst *CloseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseRequest.Merge(dst, src) -} -func (m *CloseRequest) XXX_Size() int { - return xxx_messageInfo_CloseRequest.Size(m) -} -func (m *CloseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseRequest proto.InternalMessageInfo - -const Default_CloseRequest_SendOffset int64 = -1 - -func (m *CloseRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *CloseRequest) GetSendOffset() int64 { - if m != nil && m.SendOffset != nil { - return *m.SendOffset - } - return Default_CloseRequest_SendOffset -} - -type CloseReply struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseReply) Reset() { *m = CloseReply{} } -func (m *CloseReply) String() string { return proto.CompactTextString(m) } -func (*CloseReply) ProtoMessage() {} -func (*CloseReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{24} -} -func (m *CloseReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CloseReply.Unmarshal(m, b) -} -func (m *CloseReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CloseReply.Marshal(b, m, deterministic) -} -func (dst *CloseReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseReply.Merge(dst, src) -} -func (m *CloseReply) XXX_Size() int { - return xxx_messageInfo_CloseReply.Size(m) -} -func (m *CloseReply) XXX_DiscardUnknown() { - xxx_messageInfo_CloseReply.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseReply proto.InternalMessageInfo - -type SendRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - Data []byte `protobuf:"bytes,2,req,name=data" json:"data,omitempty"` - StreamOffset *int64 `protobuf:"varint,3,req,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Flags *int32 `protobuf:"varint,4,opt,name=flags,def=0" json:"flags,omitempty"` - SendTo *AddressPort `protobuf:"bytes,5,opt,name=send_to,json=sendTo" json:"send_to,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,6,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendRequest) Reset() { *m = SendRequest{} } -func (m *SendRequest) String() string { return proto.CompactTextString(m) } -func (*SendRequest) ProtoMessage() {} -func (*SendRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{25} -} -func (m *SendRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendRequest.Unmarshal(m, b) -} -func (m *SendRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendRequest.Marshal(b, m, deterministic) -} -func (dst *SendRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendRequest.Merge(dst, src) -} -func (m *SendRequest) XXX_Size() int { - return xxx_messageInfo_SendRequest.Size(m) -} -func (m *SendRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SendRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SendRequest proto.InternalMessageInfo - -const Default_SendRequest_Flags int32 = 0 -const Default_SendRequest_TimeoutSeconds float64 = -1 - -func (m *SendRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *SendRequest) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *SendRequest) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *SendRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_SendRequest_Flags -} - -func (m *SendRequest) GetSendTo() *AddressPort { - if m != nil { - return m.SendTo - } - return nil -} - -func (m *SendRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_SendRequest_TimeoutSeconds -} - -type SendReply struct { - DataSent *int32 `protobuf:"varint,1,opt,name=data_sent,json=dataSent" json:"data_sent,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SendReply) Reset() { *m = SendReply{} } -func (m *SendReply) String() string { return proto.CompactTextString(m) } -func (*SendReply) ProtoMessage() {} -func (*SendReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{26} -} -func (m *SendReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SendReply.Unmarshal(m, b) -} -func (m *SendReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SendReply.Marshal(b, m, deterministic) -} -func (dst *SendReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_SendReply.Merge(dst, src) -} -func (m *SendReply) XXX_Size() int { - return xxx_messageInfo_SendReply.Size(m) -} -func (m *SendReply) XXX_DiscardUnknown() { - xxx_messageInfo_SendReply.DiscardUnknown(m) -} - -var xxx_messageInfo_SendReply proto.InternalMessageInfo - -func (m *SendReply) GetDataSent() int32 { - if m != nil && m.DataSent != nil { - return *m.DataSent - } - return 0 -} - -type ReceiveRequest struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - DataSize *int32 `protobuf:"varint,2,req,name=data_size,json=dataSize" json:"data_size,omitempty"` - Flags *int32 `protobuf:"varint,3,opt,name=flags,def=0" json:"flags,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,5,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveRequest) Reset() { *m = ReceiveRequest{} } -func (m *ReceiveRequest) String() string { return proto.CompactTextString(m) } -func (*ReceiveRequest) ProtoMessage() {} -func (*ReceiveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{27} -} -func (m *ReceiveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveRequest.Unmarshal(m, b) -} -func (m *ReceiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveRequest.Marshal(b, m, deterministic) -} -func (dst *ReceiveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveRequest.Merge(dst, src) -} -func (m *ReceiveRequest) XXX_Size() int { - return xxx_messageInfo_ReceiveRequest.Size(m) -} -func (m *ReceiveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveRequest proto.InternalMessageInfo - -const Default_ReceiveRequest_Flags int32 = 0 -const Default_ReceiveRequest_TimeoutSeconds float64 = -1 - -func (m *ReceiveRequest) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *ReceiveRequest) GetDataSize() int32 { - if m != nil && m.DataSize != nil { - return *m.DataSize - } - return 0 -} - -func (m *ReceiveRequest) GetFlags() int32 { - if m != nil && m.Flags != nil { - return *m.Flags - } - return Default_ReceiveRequest_Flags -} - -func (m *ReceiveRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_ReceiveRequest_TimeoutSeconds -} - -type ReceiveReply struct { - StreamOffset *int64 `protobuf:"varint,2,opt,name=stream_offset,json=streamOffset" json:"stream_offset,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` - ReceivedFrom *AddressPort `protobuf:"bytes,4,opt,name=received_from,json=receivedFrom" json:"received_from,omitempty"` - BufferSize *int32 `protobuf:"varint,5,opt,name=buffer_size,json=bufferSize" json:"buffer_size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReceiveReply) Reset() { *m = ReceiveReply{} } -func (m *ReceiveReply) String() string { return proto.CompactTextString(m) } -func (*ReceiveReply) ProtoMessage() {} -func (*ReceiveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{28} -} -func (m *ReceiveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ReceiveReply.Unmarshal(m, b) -} -func (m *ReceiveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ReceiveReply.Marshal(b, m, deterministic) -} -func (dst *ReceiveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReceiveReply.Merge(dst, src) -} -func (m *ReceiveReply) XXX_Size() int { - return xxx_messageInfo_ReceiveReply.Size(m) -} -func (m *ReceiveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ReceiveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ReceiveReply proto.InternalMessageInfo - -func (m *ReceiveReply) GetStreamOffset() int64 { - if m != nil && m.StreamOffset != nil { - return *m.StreamOffset - } - return 0 -} - -func (m *ReceiveReply) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *ReceiveReply) GetReceivedFrom() *AddressPort { - if m != nil { - return m.ReceivedFrom - } - return nil -} - -func (m *ReceiveReply) GetBufferSize() int32 { - if m != nil && m.BufferSize != nil { - return *m.BufferSize - } - return 0 -} - -type PollEvent struct { - SocketDescriptor *string `protobuf:"bytes,1,req,name=socket_descriptor,json=socketDescriptor" json:"socket_descriptor,omitempty"` - RequestedEvents *int32 `protobuf:"varint,2,req,name=requested_events,json=requestedEvents" json:"requested_events,omitempty"` - ObservedEvents *int32 `protobuf:"varint,3,req,name=observed_events,json=observedEvents" json:"observed_events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollEvent) Reset() { *m = PollEvent{} } -func (m *PollEvent) String() string { return proto.CompactTextString(m) } -func (*PollEvent) ProtoMessage() {} -func (*PollEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{29} -} -func (m *PollEvent) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollEvent.Unmarshal(m, b) -} -func (m *PollEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollEvent.Marshal(b, m, deterministic) -} -func (dst *PollEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollEvent.Merge(dst, src) -} -func (m *PollEvent) XXX_Size() int { - return xxx_messageInfo_PollEvent.Size(m) -} -func (m *PollEvent) XXX_DiscardUnknown() { - xxx_messageInfo_PollEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_PollEvent proto.InternalMessageInfo - -func (m *PollEvent) GetSocketDescriptor() string { - if m != nil && m.SocketDescriptor != nil { - return *m.SocketDescriptor - } - return "" -} - -func (m *PollEvent) GetRequestedEvents() int32 { - if m != nil && m.RequestedEvents != nil { - return *m.RequestedEvents - } - return 0 -} - -func (m *PollEvent) GetObservedEvents() int32 { - if m != nil && m.ObservedEvents != nil { - return *m.ObservedEvents - } - return 0 -} - -type PollRequest struct { - Events []*PollEvent `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` - TimeoutSeconds *float64 `protobuf:"fixed64,2,opt,name=timeout_seconds,json=timeoutSeconds,def=-1" json:"timeout_seconds,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollRequest) Reset() { *m = PollRequest{} } -func (m *PollRequest) String() string { return proto.CompactTextString(m) } -func (*PollRequest) ProtoMessage() {} -func (*PollRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{30} -} -func (m *PollRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollRequest.Unmarshal(m, b) -} -func (m *PollRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollRequest.Marshal(b, m, deterministic) -} -func (dst *PollRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollRequest.Merge(dst, src) -} -func (m *PollRequest) XXX_Size() int { - return xxx_messageInfo_PollRequest.Size(m) -} -func (m *PollRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PollRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PollRequest proto.InternalMessageInfo - -const Default_PollRequest_TimeoutSeconds float64 = -1 - -func (m *PollRequest) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -func (m *PollRequest) GetTimeoutSeconds() float64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return Default_PollRequest_TimeoutSeconds -} - -type PollReply struct { - Events []*PollEvent `protobuf:"bytes,2,rep,name=events" json:"events,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PollReply) Reset() { *m = PollReply{} } -func (m *PollReply) String() string { return proto.CompactTextString(m) } -func (*PollReply) ProtoMessage() {} -func (*PollReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{31} -} -func (m *PollReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PollReply.Unmarshal(m, b) -} -func (m *PollReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PollReply.Marshal(b, m, deterministic) -} -func (dst *PollReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_PollReply.Merge(dst, src) -} -func (m *PollReply) XXX_Size() int { - return xxx_messageInfo_PollReply.Size(m) -} -func (m *PollReply) XXX_DiscardUnknown() { - xxx_messageInfo_PollReply.DiscardUnknown(m) -} - -var xxx_messageInfo_PollReply proto.InternalMessageInfo - -func (m *PollReply) GetEvents() []*PollEvent { - if m != nil { - return m.Events - } - return nil -} - -type ResolveRequest struct { - Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"` - AddressFamilies []CreateSocketRequest_SocketFamily `protobuf:"varint,2,rep,name=address_families,json=addressFamilies,enum=appengine.CreateSocketRequest_SocketFamily" json:"address_families,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveRequest) Reset() { *m = ResolveRequest{} } -func (m *ResolveRequest) String() string { return proto.CompactTextString(m) } -func (*ResolveRequest) ProtoMessage() {} -func (*ResolveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{32} -} -func (m *ResolveRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveRequest.Unmarshal(m, b) -} -func (m *ResolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveRequest.Marshal(b, m, deterministic) -} -func (dst *ResolveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveRequest.Merge(dst, src) -} -func (m *ResolveRequest) XXX_Size() int { - return xxx_messageInfo_ResolveRequest.Size(m) -} -func (m *ResolveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveRequest proto.InternalMessageInfo - -func (m *ResolveRequest) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ResolveRequest) GetAddressFamilies() []CreateSocketRequest_SocketFamily { - if m != nil { - return m.AddressFamilies - } - return nil -} - -type ResolveReply struct { - PackedAddress [][]byte `protobuf:"bytes,2,rep,name=packed_address,json=packedAddress" json:"packed_address,omitempty"` - CanonicalName *string `protobuf:"bytes,3,opt,name=canonical_name,json=canonicalName" json:"canonical_name,omitempty"` - Aliases []string `protobuf:"bytes,4,rep,name=aliases" json:"aliases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveReply) Reset() { *m = ResolveReply{} } -func (m *ResolveReply) String() string { return proto.CompactTextString(m) } -func (*ResolveReply) ProtoMessage() {} -func (*ResolveReply) Descriptor() ([]byte, []int) { - return fileDescriptor_socket_service_b5f8f233dc327808, []int{33} -} -func (m *ResolveReply) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ResolveReply.Unmarshal(m, b) -} -func (m *ResolveReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ResolveReply.Marshal(b, m, deterministic) -} -func (dst *ResolveReply) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveReply.Merge(dst, src) -} -func (m *ResolveReply) XXX_Size() int { - return xxx_messageInfo_ResolveReply.Size(m) -} -func (m *ResolveReply) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveReply.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveReply proto.InternalMessageInfo - -func (m *ResolveReply) GetPackedAddress() [][]byte { - if m != nil { - return m.PackedAddress - } - return nil -} - -func (m *ResolveReply) GetCanonicalName() string { - if m != nil && m.CanonicalName != nil { - return *m.CanonicalName - } - return "" -} - -func (m *ResolveReply) GetAliases() []string { - if m != nil { - return m.Aliases - } - return nil -} - -func init() { - proto.RegisterType((*RemoteSocketServiceError)(nil), "appengine.RemoteSocketServiceError") - proto.RegisterType((*AddressPort)(nil), "appengine.AddressPort") - proto.RegisterType((*CreateSocketRequest)(nil), "appengine.CreateSocketRequest") - proto.RegisterType((*CreateSocketReply)(nil), "appengine.CreateSocketReply") - proto.RegisterType((*BindRequest)(nil), "appengine.BindRequest") - proto.RegisterType((*BindReply)(nil), "appengine.BindReply") - proto.RegisterType((*GetSocketNameRequest)(nil), "appengine.GetSocketNameRequest") - proto.RegisterType((*GetSocketNameReply)(nil), "appengine.GetSocketNameReply") - proto.RegisterType((*GetPeerNameRequest)(nil), "appengine.GetPeerNameRequest") - proto.RegisterType((*GetPeerNameReply)(nil), "appengine.GetPeerNameReply") - proto.RegisterType((*SocketOption)(nil), "appengine.SocketOption") - proto.RegisterType((*SetSocketOptionsRequest)(nil), "appengine.SetSocketOptionsRequest") - proto.RegisterType((*SetSocketOptionsReply)(nil), "appengine.SetSocketOptionsReply") - proto.RegisterType((*GetSocketOptionsRequest)(nil), "appengine.GetSocketOptionsRequest") - proto.RegisterType((*GetSocketOptionsReply)(nil), "appengine.GetSocketOptionsReply") - proto.RegisterType((*ConnectRequest)(nil), "appengine.ConnectRequest") - proto.RegisterType((*ConnectReply)(nil), "appengine.ConnectReply") - proto.RegisterType((*ListenRequest)(nil), "appengine.ListenRequest") - proto.RegisterType((*ListenReply)(nil), "appengine.ListenReply") - proto.RegisterType((*AcceptRequest)(nil), "appengine.AcceptRequest") - proto.RegisterType((*AcceptReply)(nil), "appengine.AcceptReply") - proto.RegisterType((*ShutDownRequest)(nil), "appengine.ShutDownRequest") - proto.RegisterType((*ShutDownReply)(nil), "appengine.ShutDownReply") - proto.RegisterType((*CloseRequest)(nil), "appengine.CloseRequest") - proto.RegisterType((*CloseReply)(nil), "appengine.CloseReply") - proto.RegisterType((*SendRequest)(nil), "appengine.SendRequest") - proto.RegisterType((*SendReply)(nil), "appengine.SendReply") - proto.RegisterType((*ReceiveRequest)(nil), "appengine.ReceiveRequest") - proto.RegisterType((*ReceiveReply)(nil), "appengine.ReceiveReply") - proto.RegisterType((*PollEvent)(nil), "appengine.PollEvent") - proto.RegisterType((*PollRequest)(nil), "appengine.PollRequest") - proto.RegisterType((*PollReply)(nil), "appengine.PollReply") - proto.RegisterType((*ResolveRequest)(nil), "appengine.ResolveRequest") - proto.RegisterType((*ResolveReply)(nil), "appengine.ResolveReply") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/socket/socket_service.proto", fileDescriptor_socket_service_b5f8f233dc327808) -} - -var fileDescriptor_socket_service_b5f8f233dc327808 = []byte{ - // 3088 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0x5f, 0x77, 0xe3, 0xc6, - 0x75, 0x37, 0x48, 0xfd, 0xe3, 0x90, 0x94, 0xee, 0x62, 0xa5, 0x5d, 0x25, 0x6e, 0x12, 0x05, 0x8e, - 0x1b, 0x25, 0x8e, 0x77, 0x6d, 0x39, 0x4d, 0x9b, 0xa4, 0x49, 0x16, 0x04, 0x86, 0x24, 0x4c, 0x00, - 0x03, 0xcd, 0x0c, 0x25, 0xd1, 0x6d, 0x8a, 0xd0, 0x22, 0xa4, 0x65, 0x4c, 0x11, 0x0c, 0xc9, 0xdd, - 0xf5, 0xba, 0x69, 0xaa, 0xfe, 0x39, 0xfd, 0x12, 0x7d, 0xe8, 0x73, 0x3f, 0x43, 0x4f, 0x4f, 0x5f, - 0xfa, 0xec, 0xc7, 0x7e, 0x84, 0x9e, 0xbe, 0xb4, 0x9f, 0xa1, 0x67, 0x06, 0xe0, 0x60, 0xc8, 0xd5, - 0xae, 0x77, 0x75, 0x72, 0x4e, 0x9e, 0xa4, 0xfb, 0xbb, 0x77, 0xee, 0xff, 0x99, 0xb9, 0x03, 0xa2, - 0x47, 0x97, 0x69, 0x7a, 0x39, 0x4a, 0x1e, 0x5c, 0xa6, 0xa3, 0xfe, 0xf8, 0xf2, 0x41, 0x3a, 0xbd, - 0x7c, 0xd8, 0x9f, 0x4c, 0x92, 0xf1, 0xe5, 0x70, 0x9c, 0x3c, 0x1c, 0x8e, 0xe7, 0xc9, 0x74, 0xdc, - 0x1f, 0x3d, 0x9c, 0xa5, 0xe7, 0x9f, 0x25, 0xf3, 0xfc, 0x4f, 0x3c, 0x4b, 0xa6, 0x4f, 0x87, 0xe7, - 0xc9, 0x83, 0xc9, 0x34, 0x9d, 0xa7, 0x66, 0x45, 0xc9, 0x5b, 0xff, 0xbc, 0x8b, 0xf6, 0x69, 0x72, - 0x95, 0xce, 0x13, 0x26, 0x25, 0x59, 0x26, 0x88, 0xa7, 0xd3, 0x74, 0x6a, 0x7e, 0x07, 0xd5, 0x66, - 0xcf, 0x67, 0xf3, 0xe4, 0x2a, 0x4e, 0x04, 0xbd, 0x6f, 0x1c, 0x18, 0x87, 0xeb, 0x3f, 0x31, 0x3e, - 0xa0, 0xd5, 0x0c, 0xce, 0xa4, 0xbe, 0x8d, 0x6a, 0x92, 0x1d, 0x0f, 0x92, 0x79, 0x7f, 0x38, 0xda, - 0x2f, 0x1d, 0x18, 0x87, 0x15, 0x5a, 0x95, 0x98, 0x2b, 0x21, 0xeb, 0x73, 0x54, 0x91, 0xb2, 0x4e, - 0x3a, 0x48, 0x4c, 0x40, 0x35, 0xd6, 0x63, 0x1c, 0x07, 0x31, 0xa6, 0x94, 0x50, 0x30, 0xcc, 0x3a, - 0xaa, 0xb4, 0x6c, 0x2f, 0x27, 0x4b, 0x66, 0x15, 0x6d, 0x36, 0x6d, 0xcf, 0xef, 0x52, 0x0c, 0x6b, - 0xe6, 0x1e, 0xba, 0x13, 0x61, 0x1a, 0x78, 0x8c, 0x79, 0x24, 0x8c, 0x5d, 0x1c, 0x7a, 0xd8, 0x85, - 0x75, 0xf3, 0x2e, 0xda, 0xf1, 0xc2, 0x13, 0xdb, 0xf7, 0xdc, 0x98, 0xe2, 0xe3, 0x2e, 0x66, 0x1c, - 0x36, 0xcc, 0x3b, 0xa8, 0xce, 0x88, 0xd3, 0xc1, 0x3c, 0x76, 0x7c, 0xc2, 0xb0, 0x0b, 0x9b, 0xd6, - 0xbf, 0x99, 0xa8, 0xca, 0x34, 0x67, 0x77, 0x50, 0x95, 0xf5, 0x58, 0xcc, 0xba, 0x8e, 0x83, 0x19, - 0x83, 0xb7, 0x84, 0x6d, 0x01, 0x60, 0x61, 0x04, 0x0c, 0x73, 0x1b, 0x21, 0x49, 0x86, 0x04, 0x87, - 0x1c, 0x4a, 0x8a, 0xcd, 0xa8, 0xd3, 0x86, 0xb2, 0x22, 0xbd, 0x90, 0x53, 0x58, 0x13, 0x9e, 0x66, - 0x24, 0x81, 0x75, 0xc5, 0x0b, 0xcf, 0x3c, 0x02, 0x1b, 0x8a, 0x3c, 0x6a, 0x78, 0x2d, 0xd8, 0x5c, - 0x18, 0x16, 0x8a, 0xcf, 0xb0, 0x03, 0x5b, 0x8a, 0xdf, 0xb0, 0xdd, 0x26, 0x54, 0x94, 0x61, 0xa7, - 0xed, 0xf9, 0x2e, 0x20, 0x45, 0xdb, 0x2d, 0xdb, 0x0b, 0xa1, 0x2a, 0x02, 0x96, 0xf4, 0x29, 0xe9, - 0xfa, 0x6e, 0xc3, 0x27, 0x4e, 0x07, 0xaa, 0x9a, 0xb7, 0x01, 0x0e, 0xa0, 0x56, 0x2c, 0x12, 0xd1, - 0x41, 0x5d, 0xd1, 0x4d, 0xbb, 0xeb, 0x73, 0xd8, 0xd6, 0x9c, 0xe0, 0x0d, 0xbf, 0x03, 0x3b, 0x85, - 0x13, 0x5d, 0xd6, 0x03, 0x50, 0xf2, 0xf8, 0xcc, 0x63, 0x1c, 0xee, 0x28, 0xf6, 0x99, 0x8b, 0x4f, - 0xc0, 0xd4, 0xcc, 0x09, 0xfa, 0xae, 0xae, 0xce, 0xf5, 0x28, 0xec, 0x2a, 0x01, 0x8f, 0x09, 0x7a, - 0xaf, 0xa0, 0x45, 0xa9, 0xe0, 0x5e, 0xa1, 0xa0, 0xe9, 0xf9, 0x18, 0xee, 0x2b, 0x3a, 0x90, 0xf4, - 0xbe, 0x66, 0x80, 0xf3, 0x1e, 0x7c, 0x4d, 0x19, 0xe0, 0x67, 0xbc, 0xc1, 0x7a, 0xf0, 0x75, 0xe5, - 0x50, 0x53, 0x24, 0xf5, 0x6d, 0x4d, 0x9e, 0x45, 0x0e, 0xfc, 0x91, 0xa2, 0x59, 0xe4, 0x45, 0x18, - 0xbe, 0xa1, 0xc4, 0x29, 0x69, 0x32, 0xf8, 0x66, 0x61, 0xce, 0xf7, 0xc2, 0x0e, 0x7c, 0xab, 0xa8, - 0xbd, 0x90, 0x3e, 0x30, 0x6b, 0x68, 0x4b, 0x92, 0x2e, 0x09, 0xe0, 0xdb, 0x4a, 0x98, 0xda, 0x61, - 0x0b, 0x83, 0xa5, 0x7c, 0x71, 0xb1, 0xed, 0xfa, 0x1d, 0x78, 0x47, 0x76, 0x9b, 0x02, 0x44, 0x3d, - 0xde, 0x31, 0x77, 0x11, 0x64, 0xfe, 0xd8, 0x01, 0xe6, 0x84, 0xf8, 0x24, 0x6c, 0xc1, 0x77, 0x34, - 0x2f, 0x7d, 0xa7, 0x03, 0xef, 0xea, 0x5e, 0xf7, 0x18, 0xfc, 0xb1, 0x52, 0x14, 0x12, 0x8e, 0x83, - 0x88, 0xf7, 0xe0, 0xbb, 0xca, 0x33, 0x9f, 0x90, 0x08, 0x0e, 0xf5, 0x3a, 0xb3, 0x16, 0x7c, 0xbf, - 0x68, 0x43, 0x97, 0x06, 0xf0, 0x9e, 0xd6, 0x3b, 0x34, 0x6c, 0xc1, 0x0f, 0xf2, 0x1d, 0x16, 0x63, - 0xff, 0x28, 0x64, 0xbd, 0xd0, 0x81, 0xf7, 0x95, 0x84, 0xff, 0x51, 0xdb, 0xe7, 0xf0, 0x40, 0xa3, - 0x29, 0xe3, 0xf0, 0xb0, 0xa0, 0x43, 0xa1, 0xe1, 0x03, 0x15, 0x6c, 0x37, 0xb4, 0xb9, 0xd3, 0x86, - 0x0f, 0x35, 0x0f, 0x1c, 0xe6, 0xc1, 0x51, 0xb1, 0xe0, 0x48, 0x28, 0xfc, 0x48, 0xef, 0x66, 0x0c, - 0x3f, 0xd4, 0x49, 0x0a, 0x7f, 0xa2, 0xa4, 0xcf, 0x9a, 0x5d, 0xdf, 0x87, 0x1f, 0x69, 0xda, 0xec, - 0x90, 0xc0, 0x9f, 0x2a, 0x73, 0x42, 0xfc, 0xd8, 0x81, 0x3f, 0xd3, 0x01, 0xe6, 0x73, 0xf8, 0xb1, - 0x5a, 0xd1, 0x68, 0x92, 0x90, 0xc3, 0x4f, 0xf5, 0x1c, 0x72, 0x0a, 0x7f, 0xae, 0xb5, 0xa2, 0x6b, - 0x73, 0x1b, 0x7e, 0xa6, 0x3c, 0xe0, 0x5e, 0x80, 0xe1, 0xe7, 0xc5, 0xe6, 0x24, 0x8c, 0xc2, 0x2f, - 0xb4, 0xe5, 0x21, 0xe6, 0xf0, 0x48, 0xa3, 0xa3, 0x4e, 0x0b, 0x6c, 0xa5, 0x8e, 0xe2, 0x80, 0x70, - 0x0c, 0x0d, 0x4d, 0xbf, 0xec, 0x1d, 0x47, 0x35, 0x8b, 0xed, 0x9e, 0x80, 0x5b, 0x34, 0x1e, 0x0d, - 0x42, 0x0e, 0x58, 0x99, 0x73, 0x48, 0x10, 0x40, 0x53, 0xb1, 0x23, 0x4a, 0x38, 0x81, 0x96, 0xaa, - 0x78, 0xd0, 0xf5, 0xb9, 0xd7, 0x26, 0x11, 0xb4, 0x8b, 0xf6, 0x22, 0xdc, 0x25, 0x1c, 0x3c, 0x3d, - 0x05, 0xa2, 0xe8, 0x1f, 0xab, 0x45, 0xe4, 0x04, 0xd3, 0xa6, 0x4f, 0x4e, 0xa1, 0xa3, 0x0a, 0x1d, - 0x12, 0xde, 0x0d, 0xbd, 0x63, 0xf0, 0x8b, 0x3c, 0xd9, 0x6e, 0xd3, 0x85, 0x40, 0x0f, 0xc4, 0x69, - 0xb7, 0x20, 0x54, 0x80, 0xef, 0x35, 0x6c, 0xc7, 0x01, 0xa2, 0x03, 0x0d, 0xdb, 0x85, 0x48, 0x07, - 0x98, 0x13, 0xc2, 0xb1, 0x0e, 0x04, 0xf6, 0x19, 0xd0, 0xa2, 0xbf, 0xbc, 0x86, 0x3c, 0xcc, 0x58, - 0xb1, 0xd1, 0x7d, 0x86, 0x8f, 0x81, 0x2b, 0x09, 0x8a, 0x19, 0xb7, 0x29, 0x87, 0xae, 0x42, 0x18, - 0xa7, 0x72, 0xbb, 0x9d, 0xa8, 0x35, 0x5d, 0x86, 0x29, 0x83, 0x53, 0x3d, 0x18, 0x71, 0x8a, 0xc3, - 0x99, 0xda, 0x4e, 0xae, 0xd0, 0xe2, 0xba, 0x94, 0xe2, 0x63, 0xe8, 0x29, 0xb9, 0x80, 0xb5, 0x98, - 0xf7, 0x09, 0x86, 0x4f, 0x4c, 0x13, 0x6d, 0x17, 0xe9, 0xe5, 0xbd, 0x08, 0xc3, 0x5f, 0xa8, 0xf3, - 0x32, 0x24, 0x12, 0x25, 0x11, 0x87, 0xbf, 0x34, 0xef, 0xa3, 0xbb, 0x85, 0x60, 0x48, 0x58, 0x37, - 0x8a, 0x08, 0xe5, 0xf0, 0x4b, 0xc5, 0x10, 0x86, 0x79, 0xc1, 0xf8, 0x2b, 0xa5, 0x9a, 0x44, 0xc2, - 0xad, 0x6e, 0x14, 0x41, 0xac, 0x1f, 0x7b, 0xac, 0x2b, 0x80, 0x85, 0x9f, 0x51, 0xb3, 0x58, 0xfa, - 0x2b, 0x85, 0xda, 0x1a, 0xda, 0x57, 0x0a, 0x45, 0x3c, 0x5e, 0xd8, 0x65, 0x18, 0x3e, 0x15, 0x77, - 0x9c, 0xc2, 0x42, 0xc2, 0xed, 0x13, 0xdb, 0xf3, 0xe1, 0xbc, 0x48, 0x08, 0xe6, 0x2e, 0x39, 0x0d, - 0x61, 0x50, 0x04, 0x85, 0x79, 0x37, 0xa4, 0xd8, 0x76, 0xda, 0x90, 0x14, 0xc7, 0x07, 0xe6, 0x14, - 0x33, 0xcc, 0xe1, 0x42, 0x99, 0x76, 0x48, 0x18, 0xda, 0x0d, 0x42, 0x39, 0x76, 0xe1, 0x52, 0x99, - 0x16, 0x68, 0x26, 0xf9, 0x58, 0x8b, 0xa5, 0xd1, 0x6d, 0x32, 0x18, 0x2a, 0xc0, 0x63, 0x42, 0x0c, - 0x7e, 0xad, 0x97, 0x45, 0x22, 0x9f, 0x29, 0x83, 0xac, 0xdd, 0xcd, 0x1c, 0x1b, 0x29, 0x83, 0x9c, - 0x90, 0xc0, 0x0e, 0x7b, 0x14, 0x37, 0x19, 0x5c, 0x29, 0x41, 0xb1, 0x07, 0x5d, 0xd2, 0xe5, 0x30, - 0x5e, 0xf2, 0x8c, 0xe2, 0x66, 0x57, 0xdc, 0xd2, 0xa9, 0x12, 0x6c, 0x13, 0x96, 0x69, 0x9c, 0x28, - 0x41, 0x01, 0x2d, 0x62, 0xfd, 0x8d, 0x72, 0xc6, 0xf6, 0x29, 0xb6, 0xdd, 0x1e, 0x4c, 0x55, 0x4a, - 0xbc, 0x30, 0xa2, 0xa4, 0x45, 0xc5, 0xa5, 0x3e, 0x2b, 0xb6, 0x23, 0xb7, 0x7d, 0x0c, 0xf3, 0xe2, - 0x38, 0x73, 0x7c, 0x6c, 0x87, 0xf0, 0x44, 0x2f, 0x61, 0x68, 0x07, 0xf0, 0xb4, 0x00, 0xb2, 0xe4, - 0x3f, 0xd3, 0xae, 0x32, 0x21, 0xf0, 0xb9, 0x72, 0x31, 0x3b, 0x11, 0x3c, 0x02, 0xcf, 0x95, 0x88, - 0x7b, 0xdc, 0x25, 0x1c, 0xbe, 0xd0, 0xce, 0xf1, 0x00, 0xbb, 0x5e, 0x37, 0x80, 0xbf, 0x56, 0xde, - 0x65, 0x80, 0x6c, 0xcd, 0xdf, 0x2a, 0x39, 0xc7, 0x0e, 0x1d, 0xec, 0x63, 0x17, 0xfe, 0x46, 0x3b, - 0x7f, 0x3a, 0xb8, 0x07, 0xbf, 0x53, 0xeb, 0x3a, 0xb8, 0x87, 0xcf, 0x22, 0x8f, 0x62, 0x17, 0xfe, - 0xd6, 0xdc, 0x2d, 0x40, 0x8a, 0x4f, 0x48, 0x07, 0xbb, 0x70, 0x6d, 0x98, 0x7b, 0x79, 0xa2, 0x24, - 0xfa, 0x31, 0x76, 0x44, 0xad, 0xff, 0xce, 0x30, 0xef, 0x2e, 0x1a, 0xf7, 0x34, 0xc4, 0x54, 0x5c, - 0x51, 0xf0, 0xf7, 0x86, 0xb9, 0x9f, 0xb7, 0x79, 0x48, 0x38, 0xc5, 0x8e, 0x38, 0x48, 0xec, 0x86, - 0x8f, 0xe1, 0x1f, 0x0c, 0x13, 0x16, 0xe7, 0x44, 0xb3, 0xe3, 0xf9, 0x3e, 0xfc, 0xa3, 0xf1, 0xf5, - 0x12, 0x18, 0xd6, 0x15, 0xaa, 0xda, 0x83, 0xc1, 0x34, 0x99, 0xcd, 0xa2, 0x74, 0x3a, 0x37, 0x4d, - 0xb4, 0x36, 0x49, 0xa7, 0xf3, 0x7d, 0xe3, 0xa0, 0x74, 0xb8, 0x4e, 0xe5, 0xff, 0xe6, 0xbb, 0x68, - 0x7b, 0xd2, 0x3f, 0xff, 0x2c, 0x19, 0xc4, 0xfd, 0x4c, 0x52, 0xce, 0x7f, 0x35, 0x5a, 0xcf, 0xd0, - 0x7c, 0xb9, 0xf9, 0x0e, 0xaa, 0x3f, 0x4e, 0x67, 0xf3, 0x71, 0xff, 0x2a, 0x89, 0x1f, 0x0f, 0xc7, - 0xf3, 0xfd, 0xb2, 0x9c, 0x12, 0x6b, 0x0b, 0xb0, 0x3d, 0x1c, 0xcf, 0xad, 0x7f, 0x5a, 0x43, 0x77, - 0x9d, 0x69, 0xd2, 0x5f, 0x0c, 0xa3, 0x34, 0xf9, 0xcd, 0x93, 0x64, 0x36, 0x37, 0x1d, 0xb4, 0x71, - 0xd1, 0xbf, 0x1a, 0x8e, 0x9e, 0x4b, 0xcb, 0xdb, 0x47, 0xef, 0x3d, 0x50, 0x03, 0xec, 0x83, 0x1b, - 0xe4, 0x1f, 0x64, 0x54, 0x53, 0x2e, 0xa1, 0xf9, 0x52, 0xd3, 0x43, 0x5b, 0x72, 0xfa, 0x3d, 0x4f, - 0xc5, 0x88, 0x2a, 0xd4, 0xbc, 0xff, 0x5a, 0x6a, 0xa2, 0x7c, 0x11, 0x55, 0xcb, 0xcd, 0x9f, 0xa3, - 0xed, 0x7c, 0xae, 0x4e, 0x27, 0xf3, 0x61, 0x3a, 0x9e, 0xed, 0x97, 0x0f, 0xca, 0x87, 0xd5, 0xa3, - 0xfb, 0x9a, 0xc2, 0x6c, 0x31, 0x91, 0x7c, 0x5a, 0x9f, 0x69, 0xd4, 0xcc, 0x6c, 0xa0, 0x3b, 0x93, - 0x69, 0xfa, 0xf9, 0xf3, 0x38, 0xf9, 0x3c, 0x9b, 0xd6, 0xe3, 0xe1, 0x64, 0x7f, 0xed, 0xc0, 0x38, - 0xac, 0x1e, 0xdd, 0xd3, 0x54, 0x68, 0xa9, 0xa7, 0x3b, 0x72, 0x01, 0xce, 0xe5, 0xbd, 0x89, 0x79, - 0x88, 0xb6, 0x47, 0xc3, 0xd9, 0x3c, 0x19, 0xc7, 0x9f, 0xf6, 0xcf, 0x3f, 0x1b, 0xa5, 0x97, 0xfb, - 0xeb, 0x8b, 0xe9, 0xbc, 0x9e, 0x31, 0x1a, 0x19, 0x6e, 0x7e, 0x84, 0x2a, 0x53, 0x39, 0xe1, 0x0b, - 0x2b, 0x1b, 0xaf, 0xb4, 0xb2, 0x95, 0x09, 0x7a, 0x13, 0x73, 0x0f, 0x6d, 0xf4, 0x27, 0x93, 0x78, - 0x38, 0xd8, 0xaf, 0xc8, 0x42, 0xad, 0xf7, 0x27, 0x13, 0x6f, 0x60, 0x7e, 0x03, 0xa1, 0xc9, 0x34, - 0xfd, 0x75, 0x72, 0x3e, 0x17, 0x2c, 0x74, 0x60, 0x1c, 0x96, 0x69, 0x25, 0x47, 0xbc, 0x81, 0x65, - 0xa1, 0x9a, 0x9e, 0x7b, 0x73, 0x0b, 0xad, 0x79, 0xd1, 0xd3, 0x1f, 0x82, 0x91, 0xff, 0xf7, 0x23, - 0x28, 0x59, 0x16, 0xda, 0x5e, 0x4e, 0xac, 0xb9, 0x89, 0xca, 0xdc, 0x89, 0xc0, 0x10, 0xff, 0x74, - 0xdd, 0x08, 0x4a, 0xd6, 0x97, 0x06, 0xba, 0xb3, 0x5c, 0x91, 0xc9, 0xe8, 0xb9, 0xf9, 0x1e, 0xba, - 0x93, 0xa7, 0x7d, 0x90, 0xcc, 0xce, 0xa7, 0xc3, 0xc9, 0x3c, 0x7f, 0x93, 0x54, 0x28, 0x64, 0x0c, - 0x57, 0xe1, 0xe6, 0xcf, 0xd0, 0xb6, 0x78, 0xf4, 0x24, 0x53, 0xd5, 0x97, 0xe5, 0x57, 0x86, 0x5e, - 0xcf, 0xa4, 0x17, 0xfd, 0xfa, 0x7b, 0x28, 0xd1, 0xf7, 0x2b, 0x5b, 0xff, 0xb3, 0x09, 0xd7, 0xd7, - 0xd7, 0xd7, 0x25, 0xeb, 0x77, 0xa8, 0xda, 0x18, 0x8e, 0x07, 0x8b, 0x86, 0x7e, 0x49, 0x24, 0xa5, - 0x1b, 0x23, 0xb9, 0xd1, 0x15, 0xd1, 0xc1, 0xaf, 0xef, 0x8a, 0x45, 0x50, 0x25, 0xb3, 0x2f, 0xf2, - 0x78, 0xa3, 0x42, 0xe3, 0x8d, 0x62, 0xb3, 0x1c, 0xb4, 0xdb, 0x4a, 0xe6, 0x59, 0x75, 0xc2, 0xfe, - 0x55, 0x72, 0x9b, 0xc8, 0xac, 0x33, 0x64, 0xae, 0x28, 0x79, 0xa9, 0x7b, 0xa5, 0x37, 0x73, 0xcf, - 0x96, 0x9a, 0xa3, 0x24, 0x99, 0xde, 0xda, 0x39, 0x07, 0xc1, 0x92, 0x0a, 0xe1, 0xda, 0x43, 0xb4, - 0x39, 0x49, 0x92, 0xe9, 0x57, 0x3b, 0xb4, 0x21, 0xc4, 0xbc, 0x89, 0xf5, 0xe5, 0xe6, 0x62, 0x47, - 0x64, 0x7b, 0xdf, 0xfc, 0x05, 0x5a, 0x1f, 0x25, 0x4f, 0x93, 0x51, 0x7e, 0x92, 0x7d, 0xef, 0x25, - 0x27, 0xc6, 0x12, 0xe1, 0x8b, 0x05, 0x34, 0x5b, 0x67, 0x3e, 0x42, 0x1b, 0xd9, 0xa1, 0x93, 0x1f, - 0x62, 0x87, 0xaf, 0xa3, 0x41, 0x46, 0x90, 0xaf, 0x33, 0x77, 0xd1, 0xfa, 0xd3, 0xfe, 0xe8, 0x49, - 0xb2, 0x5f, 0x3e, 0x28, 0x1d, 0xd6, 0x68, 0x46, 0x58, 0x09, 0xba, 0xf3, 0x82, 0x4d, 0xed, 0x41, - 0xcd, 0x88, 0x1f, 0x7b, 0x11, 0xbc, 0x25, 0x67, 0x95, 0x02, 0xca, 0xfe, 0x05, 0x43, 0xce, 0x16, - 0x05, 0x2c, 0xb6, 0xf3, 0xc6, 0x0a, 0x26, 0x76, 0xf6, 0x1d, 0xeb, 0xdf, 0xd7, 0x11, 0xac, 0x7a, - 0x26, 0x6f, 0xbb, 0x85, 0x60, 0xec, 0xe2, 0x46, 0xb7, 0x05, 0x86, 0x1c, 0xc9, 0x14, 0x48, 0xc5, - 0x94, 0x28, 0xc6, 0x23, 0x28, 0x2d, 0xa9, 0x8d, 0xe5, 0x95, 0x5a, 0x5e, 0xd6, 0x90, 0x7d, 0x47, - 0x58, 0x5b, 0xd6, 0xe0, 0x92, 0x90, 0x53, 0xd2, 0xe5, 0x18, 0xd6, 0x97, 0x19, 0x0d, 0x4a, 0x6c, - 0xd7, 0xb1, 0xe5, 0x07, 0x04, 0x31, 0x74, 0x28, 0x06, 0x0b, 0xdd, 0x46, 0xb7, 0x09, 0x9b, 0xcb, - 0x28, 0x75, 0x4e, 0x04, 0xba, 0xb5, 0xac, 0xa4, 0x83, 0x71, 0x64, 0xfb, 0xde, 0x09, 0x86, 0xca, - 0x32, 0x83, 0x90, 0x86, 0x17, 0xfa, 0x5e, 0x88, 0x01, 0x2d, 0xeb, 0xf1, 0xbd, 0xb0, 0x85, 0x29, - 0xd4, 0xcd, 0x7b, 0xc8, 0x5c, 0xd2, 0x2e, 0x86, 0x25, 0x02, 0xbb, 0xcb, 0x38, 0x0b, 0xdd, 0x0c, - 0xdf, 0xd3, 0x6a, 0xe2, 0x45, 0x31, 0x27, 0x0c, 0x8c, 0x15, 0x88, 0xfb, 0x50, 0xd2, 0xca, 0xe4, - 0x45, 0x71, 0x5b, 0x8c, 0x9a, 0x8e, 0x0f, 0xe5, 0x65, 0x98, 0x44, 0xdc, 0x23, 0x21, 0x83, 0x35, - 0xcd, 0x16, 0x77, 0xa2, 0x58, 0x3c, 0xef, 0x7d, 0xbb, 0x07, 0x86, 0x26, 0x2e, 0xf0, 0xc0, 0x3e, - 0x63, 0xb8, 0x05, 0x25, 0x2d, 0xdb, 0x02, 0x76, 0x08, 0xed, 0x40, 0x59, 0x0b, 0x5b, 0x80, 0x22, - 0x21, 0x9e, 0xeb, 0x63, 0x58, 0x33, 0xf7, 0xd1, 0xee, 0x2a, 0x23, 0xe4, 0x27, 0x3e, 0xac, 0xaf, - 0x98, 0x15, 0x1c, 0x27, 0x14, 0x65, 0x58, 0x36, 0x2b, 0x9e, 0xb0, 0x21, 0x87, 0xcd, 0x15, 0xf1, - 0x2c, 0x81, 0x47, 0xb0, 0x65, 0xbe, 0x8d, 0xee, 0x6b, 0xb8, 0x8b, 0x9b, 0x98, 0xc6, 0xb6, 0xe3, - 0xe0, 0x88, 0x43, 0x65, 0x85, 0x79, 0xea, 0x85, 0x2e, 0x39, 0x8d, 0x1d, 0xdf, 0x0e, 0x22, 0x40, - 0x2b, 0x81, 0x78, 0x61, 0x93, 0x40, 0x75, 0x25, 0x90, 0xe3, 0xae, 0xe7, 0x74, 0x6c, 0xa7, 0x03, - 0x35, 0x39, 0x11, 0x3d, 0x47, 0xf7, 0xd9, 0xe2, 0xc8, 0xca, 0xaf, 0xf3, 0x5b, 0x1d, 0xea, 0x1f, - 0xa2, 0xcd, 0xc5, 0xec, 0x50, 0x7a, 0xf5, 0xec, 0xb0, 0x90, 0xb3, 0xee, 0xa3, 0xbd, 0x17, 0x4d, - 0x4f, 0x46, 0xcf, 0x85, 0x4f, 0xad, 0x3f, 0x90, 0x4f, 0x1f, 0xa3, 0xbd, 0xd6, 0x4d, 0x3e, 0xdd, - 0x46, 0xd7, 0xbf, 0x18, 0x68, 0xdb, 0x49, 0xc7, 0xe3, 0xe4, 0x7c, 0x7e, 0x2b, 0xf7, 0x97, 0xe6, - 0x9c, 0x57, 0xdf, 0x8f, 0xc5, 0x9c, 0xf3, 0x1e, 0xda, 0x99, 0x0f, 0xaf, 0x92, 0xf4, 0xc9, 0x3c, - 0x9e, 0x25, 0xe7, 0xe9, 0x78, 0x90, 0xcd, 0x09, 0xc6, 0x4f, 0x4a, 0xef, 0x7f, 0x48, 0xb7, 0x73, - 0x16, 0xcb, 0x38, 0xd6, 0x2f, 0x51, 0x4d, 0x39, 0xf8, 0x7b, 0xba, 0x48, 0xf5, 0x21, 0xe1, 0x04, - 0xd5, 0x7d, 0x39, 0xb9, 0xdd, 0x2a, 0xfc, 0x7d, 0xb4, 0xb9, 0x98, 0x04, 0x4b, 0x72, 0x3e, 0x5f, - 0x90, 0x56, 0x1d, 0x55, 0x17, 0x7a, 0x45, 0xbb, 0x0c, 0x51, 0xdd, 0x3e, 0x3f, 0x4f, 0x26, 0xb7, - 0xcb, 0xf2, 0x0d, 0x09, 0x2b, 0xbd, 0x34, 0x61, 0xd7, 0x06, 0xaa, 0x2e, 0x6c, 0x89, 0x84, 0x1d, - 0xa1, 0xbd, 0x71, 0xf2, 0x2c, 0x7e, 0xd1, 0x5a, 0xf6, 0x66, 0xb8, 0x3b, 0x4e, 0x9e, 0xb1, 0x1b, - 0x06, 0xb9, 0xbc, 0xac, 0xaf, 0x39, 0xc8, 0x65, 0xd2, 0x39, 0x64, 0xfd, 0x97, 0x81, 0x76, 0xd8, - 0xe3, 0x27, 0x73, 0x37, 0x7d, 0x76, 0xbb, 0xbc, 0x7e, 0x80, 0xca, 0x8f, 0xd3, 0x67, 0xf9, 0x6d, - 0xfb, 0x4d, 0xbd, 0x8b, 0x97, 0xb5, 0x3e, 0x68, 0xa7, 0xcf, 0xa8, 0x10, 0x35, 0xbf, 0x85, 0xaa, - 0xb3, 0x64, 0x3c, 0x88, 0xd3, 0x8b, 0x8b, 0x59, 0x32, 0x97, 0xd7, 0x6c, 0x99, 0x22, 0x01, 0x11, - 0x89, 0x58, 0x0e, 0x2a, 0xb7, 0xd3, 0x67, 0xfa, 0x45, 0xd6, 0xee, 0xf2, 0x98, 0xba, 0xcb, 0xf7, - 0xa8, 0xc0, 0x4e, 0xc5, 0x85, 0xa7, 0xdd, 0x1b, 0x99, 0xdc, 0x29, 0x85, 0xb2, 0xb5, 0x83, 0xea, - 0x85, 0x07, 0xa2, 0xae, 0xbf, 0x42, 0x35, 0x67, 0x94, 0xce, 0x6e, 0x35, 0xed, 0x98, 0xef, 0x2c, - 0xfb, 0x2c, 0xea, 0x51, 0x96, 0x25, 0xd5, 0xfd, 0xae, 0x21, 0x94, 0x5b, 0x10, 0xf6, 0xfe, 0xcf, - 0x40, 0x55, 0x96, 0xdc, 0x72, 0xa8, 0xbd, 0x87, 0xd6, 0x06, 0xfd, 0x79, 0x5f, 0xa6, 0xb5, 0xd6, - 0x28, 0x6d, 0x19, 0x54, 0xd2, 0xe2, 0x9d, 0x38, 0x9b, 0x4f, 0x93, 0xfe, 0xd5, 0x72, 0xf6, 0x6a, - 0x19, 0x98, 0xf9, 0x61, 0xde, 0x47, 0xeb, 0x17, 0xa3, 0xfe, 0xe5, 0x4c, 0x0e, 0xe4, 0xf2, 0xc9, - 0x93, 0xd1, 0x62, 0x3e, 0x93, 0x51, 0xcc, 0x53, 0xf9, 0x1a, 0x7a, 0xc5, 0x7c, 0x26, 0xc4, 0x78, - 0x7a, 0x53, 0x37, 0x6f, 0xbc, 0xb4, 0x9b, 0x0f, 0x51, 0x25, 0x8b, 0x57, 0xb4, 0xf2, 0xdb, 0xa8, - 0x22, 0x1c, 0x8e, 0x67, 0xc9, 0x78, 0x9e, 0xfd, 0x30, 0x42, 0xb7, 0x04, 0xc0, 0x92, 0xf1, 0xdc, - 0xfa, 0x4f, 0x03, 0x6d, 0xd3, 0xe4, 0x3c, 0x19, 0x3e, 0xbd, 0x5d, 0x35, 0x94, 0xf2, 0xe1, 0x17, - 0x49, 0xbe, 0x9b, 0x33, 0xe5, 0xc3, 0x2f, 0x92, 0x22, 0xfa, 0xf2, 0x4a, 0xf4, 0x37, 0x04, 0xb3, - 0xfe, 0xd2, 0x60, 0x2c, 0xb4, 0xde, 0x94, 0xab, 0xaa, 0x68, 0x33, 0x60, 0x2d, 0x31, 0xa8, 0x80, - 0x61, 0xd6, 0xd0, 0x96, 0x20, 0x22, 0x8c, 0x3b, 0x50, 0xb2, 0xfe, 0xd5, 0x40, 0x35, 0x15, 0x86, - 0x08, 0xfa, 0x85, 0xea, 0xc8, 0x3e, 0x59, 0xa9, 0xce, 0xa2, 0xb4, 0xc2, 0x3d, 0xbd, 0xb4, 0x3f, - 0x45, 0xf5, 0x69, 0xa6, 0x6c, 0x10, 0x5f, 0x4c, 0xd3, 0xab, 0xaf, 0x78, 0x4e, 0xd5, 0x16, 0xc2, - 0xcd, 0x69, 0x7a, 0x25, 0xf6, 0xd4, 0xa7, 0x4f, 0x2e, 0x2e, 0x92, 0x69, 0x96, 0x13, 0xf9, 0xd6, - 0xa5, 0x28, 0x83, 0x44, 0x56, 0xac, 0x2f, 0xcb, 0xa8, 0x12, 0xa5, 0xa3, 0x11, 0x7e, 0x9a, 0x8c, - 0xdf, 0x30, 0xdb, 0xdf, 0x43, 0x30, 0xcd, 0xaa, 0x94, 0x0c, 0xe2, 0x44, 0xac, 0x9f, 0xe5, 0x49, - 0xdf, 0x51, 0xb8, 0x54, 0x3b, 0x33, 0xbf, 0x8b, 0x76, 0xd2, 0x4f, 0xe5, 0x4b, 0x51, 0x49, 0x96, - 0xa5, 0xe4, 0xf6, 0x02, 0xce, 0x04, 0xad, 0xff, 0x28, 0xa1, 0xba, 0x72, 0x47, 0x24, 0x5a, 0x9b, - 0x35, 0x22, 0xe2, 0xfb, 0x21, 0x09, 0x31, 0xbc, 0xa5, 0x4d, 0x6e, 0x02, 0xf4, 0xc2, 0xa5, 0x13, - 0x40, 0x40, 0x11, 0xf5, 0x96, 0x46, 0x5e, 0x81, 0x91, 0x2e, 0x87, 0xb5, 0x15, 0x0c, 0x53, 0x0a, - 0x5b, 0x2b, 0x58, 0xbb, 0x1b, 0x01, 0xac, 0xda, 0x3d, 0xb1, 0x7d, 0x38, 0xd0, 0x26, 0x2c, 0x01, - 0x52, 0x37, 0x24, 0x34, 0x80, 0x47, 0xe6, 0xbd, 0x15, 0xb8, 0x61, 0x87, 0xf2, 0x1b, 0xd3, 0x32, - 0x7e, 0x4a, 0xa5, 0xf8, 0x75, 0xe9, 0x05, 0x3c, 0x93, 0x5f, 0x93, 0x1f, 0x9f, 0x0a, 0x3c, 0x60, - 0x2d, 0xb8, 0xde, 0x5a, 0x55, 0x8e, 0x03, 0x72, 0x82, 0xe1, 0xfa, 0x40, 0x7e, 0xc0, 0xd2, 0x8d, - 0x0a, 0xb7, 0xaf, 0x1f, 0x59, 0x8f, 0x51, 0x55, 0x24, 0x70, 0xb1, 0x7f, 0x7e, 0x80, 0x36, 0xf2, - 0x84, 0x1b, 0x72, 0x9e, 0xd8, 0xd5, 0xda, 0x46, 0x25, 0x9a, 0xe6, 0x32, 0x6f, 0x76, 0x4b, 0xfd, - 0x38, 0xeb, 0x9c, 0xac, 0xc5, 0x0b, 0x3b, 0xa5, 0xaf, 0xb6, 0x63, 0xfd, 0x56, 0xec, 0xf3, 0x59, - 0x3a, 0x2a, 0xf6, 0xb9, 0x89, 0xd6, 0xc6, 0xfd, 0xab, 0x24, 0x6f, 0x36, 0xf9, 0xbf, 0x79, 0x82, - 0x20, 0xbf, 0xbb, 0x62, 0xf9, 0x31, 0x6a, 0x98, 0x64, 0xda, 0xdf, 0xf0, 0x4b, 0xd6, 0x4e, 0xae, - 0xa4, 0x99, 0xeb, 0xb0, 0xfe, 0xbb, 0x2c, 0xf6, 0x67, 0x6e, 0x5e, 0x38, 0x7f, 0xd3, 0xc7, 0xb8, - 0xf2, 0x8b, 0x1f, 0xe3, 0xde, 0x45, 0xdb, 0xe7, 0xfd, 0x71, 0x3a, 0x1e, 0x9e, 0xf7, 0x47, 0xb1, - 0xf4, 0x36, 0xfb, 0x1a, 0x57, 0x57, 0xa8, 0x7c, 0x96, 0xed, 0xa3, 0xcd, 0xfe, 0x68, 0xd8, 0x9f, - 0x25, 0xe2, 0xa0, 0x2d, 0x1f, 0x56, 0xe8, 0x82, 0xb4, 0xfe, 0xb7, 0xa4, 0xff, 0xa0, 0xfb, 0x35, - 0xb4, 0x97, 0x17, 0x10, 0xdb, 0x5e, 0x2c, 0x5e, 0x69, 0x4d, 0x3b, 0xf0, 0x7c, 0xf1, 0x80, 0x28, - 0xae, 0x2e, 0xc9, 0x92, 0xbf, 0x65, 0x96, 0xb4, 0x09, 0x5b, 0xa0, 0x0d, 0xdb, 0x6d, 0xfa, 0x76, - 0x8b, 0x2d, 0x3d, 0xe3, 0x04, 0xa3, 0x69, 0x7b, 0x7e, 0xf6, 0x0b, 0xf0, 0x12, 0x28, 0x55, 0xaf, - 0xaf, 0xc0, 0x01, 0x0e, 0x08, 0xed, 0x2d, 0xbd, 0x1d, 0x04, 0x9c, 0xff, 0x1c, 0xb4, 0xf9, 0x02, - 0x1c, 0xda, 0x01, 0x86, 0x2d, 0xed, 0x49, 0x21, 0x60, 0x86, 0xe9, 0x89, 0xe7, 0x2c, 0xbf, 0xe1, - 0x24, 0x4e, 0x9c, 0x8e, 0x7c, 0x68, 0xa2, 0x15, 0x3d, 0xd9, 0xef, 0xd8, 0x4b, 0x6f, 0x86, 0x3c, - 0xa2, 0xb6, 0x17, 0x72, 0x06, 0xb5, 0x15, 0x86, 0xfc, 0xdd, 0xc1, 0x21, 0x3e, 0xd4, 0x57, 0x18, - 0xea, 0x37, 0x9d, 0x6d, 0x6d, 0x0f, 0xcb, 0xb8, 0xec, 0x33, 0xd8, 0x69, 0x6c, 0x7d, 0xb2, 0x91, - 0x9d, 0x5a, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x31, 0x03, 0x4e, 0xbd, 0xfd, 0x1f, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto b/vendor/google.golang.org/appengine/internal/socket/socket_service.proto deleted file mode 100644 index 2fcc7953d..000000000 --- a/vendor/google.golang.org/appengine/internal/socket/socket_service.proto +++ /dev/null @@ -1,460 +0,0 @@ -syntax = "proto2"; -option go_package = "socket"; - -package appengine; - -message RemoteSocketServiceError { - enum ErrorCode { - SYSTEM_ERROR = 1; - GAI_ERROR = 2; - FAILURE = 4; - PERMISSION_DENIED = 5; - INVALID_REQUEST = 6; - SOCKET_CLOSED = 7; - } - - enum SystemError { - option allow_alias = true; - - SYS_SUCCESS = 0; - SYS_EPERM = 1; - SYS_ENOENT = 2; - SYS_ESRCH = 3; - SYS_EINTR = 4; - SYS_EIO = 5; - SYS_ENXIO = 6; - SYS_E2BIG = 7; - SYS_ENOEXEC = 8; - SYS_EBADF = 9; - SYS_ECHILD = 10; - SYS_EAGAIN = 11; - SYS_EWOULDBLOCK = 11; - SYS_ENOMEM = 12; - SYS_EACCES = 13; - SYS_EFAULT = 14; - SYS_ENOTBLK = 15; - SYS_EBUSY = 16; - SYS_EEXIST = 17; - SYS_EXDEV = 18; - SYS_ENODEV = 19; - SYS_ENOTDIR = 20; - SYS_EISDIR = 21; - SYS_EINVAL = 22; - SYS_ENFILE = 23; - SYS_EMFILE = 24; - SYS_ENOTTY = 25; - SYS_ETXTBSY = 26; - SYS_EFBIG = 27; - SYS_ENOSPC = 28; - SYS_ESPIPE = 29; - SYS_EROFS = 30; - SYS_EMLINK = 31; - SYS_EPIPE = 32; - SYS_EDOM = 33; - SYS_ERANGE = 34; - SYS_EDEADLK = 35; - SYS_EDEADLOCK = 35; - SYS_ENAMETOOLONG = 36; - SYS_ENOLCK = 37; - SYS_ENOSYS = 38; - SYS_ENOTEMPTY = 39; - SYS_ELOOP = 40; - SYS_ENOMSG = 42; - SYS_EIDRM = 43; - SYS_ECHRNG = 44; - SYS_EL2NSYNC = 45; - SYS_EL3HLT = 46; - SYS_EL3RST = 47; - SYS_ELNRNG = 48; - SYS_EUNATCH = 49; - SYS_ENOCSI = 50; - SYS_EL2HLT = 51; - SYS_EBADE = 52; - SYS_EBADR = 53; - SYS_EXFULL = 54; - SYS_ENOANO = 55; - SYS_EBADRQC = 56; - SYS_EBADSLT = 57; - SYS_EBFONT = 59; - SYS_ENOSTR = 60; - SYS_ENODATA = 61; - SYS_ETIME = 62; - SYS_ENOSR = 63; - SYS_ENONET = 64; - SYS_ENOPKG = 65; - SYS_EREMOTE = 66; - SYS_ENOLINK = 67; - SYS_EADV = 68; - SYS_ESRMNT = 69; - SYS_ECOMM = 70; - SYS_EPROTO = 71; - SYS_EMULTIHOP = 72; - SYS_EDOTDOT = 73; - SYS_EBADMSG = 74; - SYS_EOVERFLOW = 75; - SYS_ENOTUNIQ = 76; - SYS_EBADFD = 77; - SYS_EREMCHG = 78; - SYS_ELIBACC = 79; - SYS_ELIBBAD = 80; - SYS_ELIBSCN = 81; - SYS_ELIBMAX = 82; - SYS_ELIBEXEC = 83; - SYS_EILSEQ = 84; - SYS_ERESTART = 85; - SYS_ESTRPIPE = 86; - SYS_EUSERS = 87; - SYS_ENOTSOCK = 88; - SYS_EDESTADDRREQ = 89; - SYS_EMSGSIZE = 90; - SYS_EPROTOTYPE = 91; - SYS_ENOPROTOOPT = 92; - SYS_EPROTONOSUPPORT = 93; - SYS_ESOCKTNOSUPPORT = 94; - SYS_EOPNOTSUPP = 95; - SYS_ENOTSUP = 95; - SYS_EPFNOSUPPORT = 96; - SYS_EAFNOSUPPORT = 97; - SYS_EADDRINUSE = 98; - SYS_EADDRNOTAVAIL = 99; - SYS_ENETDOWN = 100; - SYS_ENETUNREACH = 101; - SYS_ENETRESET = 102; - SYS_ECONNABORTED = 103; - SYS_ECONNRESET = 104; - SYS_ENOBUFS = 105; - SYS_EISCONN = 106; - SYS_ENOTCONN = 107; - SYS_ESHUTDOWN = 108; - SYS_ETOOMANYREFS = 109; - SYS_ETIMEDOUT = 110; - SYS_ECONNREFUSED = 111; - SYS_EHOSTDOWN = 112; - SYS_EHOSTUNREACH = 113; - SYS_EALREADY = 114; - SYS_EINPROGRESS = 115; - SYS_ESTALE = 116; - SYS_EUCLEAN = 117; - SYS_ENOTNAM = 118; - SYS_ENAVAIL = 119; - SYS_EISNAM = 120; - SYS_EREMOTEIO = 121; - SYS_EDQUOT = 122; - SYS_ENOMEDIUM = 123; - SYS_EMEDIUMTYPE = 124; - SYS_ECANCELED = 125; - SYS_ENOKEY = 126; - SYS_EKEYEXPIRED = 127; - SYS_EKEYREVOKED = 128; - SYS_EKEYREJECTED = 129; - SYS_EOWNERDEAD = 130; - SYS_ENOTRECOVERABLE = 131; - SYS_ERFKILL = 132; - } - - optional int32 system_error = 1 [default=0]; - optional string error_detail = 2; -} - -message AddressPort { - required int32 port = 1; - optional bytes packed_address = 2; - - optional string hostname_hint = 3; -} - - - -message CreateSocketRequest { - enum SocketFamily { - IPv4 = 1; - IPv6 = 2; - } - - enum SocketProtocol { - TCP = 1; - UDP = 2; - } - - required SocketFamily family = 1; - required SocketProtocol protocol = 2; - - repeated SocketOption socket_options = 3; - - optional AddressPort proxy_external_ip = 4; - - optional int32 listen_backlog = 5 [default=0]; - - optional AddressPort remote_ip = 6; - - optional string app_id = 9; - - optional int64 project_id = 10; -} - -message CreateSocketReply { - optional string socket_descriptor = 1; - - optional AddressPort server_address = 3; - - optional AddressPort proxy_external_ip = 4; - - extensions 1000 to max; -} - - - -message BindRequest { - required string socket_descriptor = 1; - required AddressPort proxy_external_ip = 2; -} - -message BindReply { - optional AddressPort proxy_external_ip = 1; -} - - - -message GetSocketNameRequest { - required string socket_descriptor = 1; -} - -message GetSocketNameReply { - optional AddressPort proxy_external_ip = 2; -} - - - -message GetPeerNameRequest { - required string socket_descriptor = 1; -} - -message GetPeerNameReply { - optional AddressPort peer_ip = 2; -} - - -message SocketOption { - - enum SocketOptionLevel { - SOCKET_SOL_IP = 0; - SOCKET_SOL_SOCKET = 1; - SOCKET_SOL_TCP = 6; - SOCKET_SOL_UDP = 17; - } - - enum SocketOptionName { - option allow_alias = true; - - SOCKET_SO_DEBUG = 1; - SOCKET_SO_REUSEADDR = 2; - SOCKET_SO_TYPE = 3; - SOCKET_SO_ERROR = 4; - SOCKET_SO_DONTROUTE = 5; - SOCKET_SO_BROADCAST = 6; - SOCKET_SO_SNDBUF = 7; - SOCKET_SO_RCVBUF = 8; - SOCKET_SO_KEEPALIVE = 9; - SOCKET_SO_OOBINLINE = 10; - SOCKET_SO_LINGER = 13; - SOCKET_SO_RCVTIMEO = 20; - SOCKET_SO_SNDTIMEO = 21; - - SOCKET_IP_TOS = 1; - SOCKET_IP_TTL = 2; - SOCKET_IP_HDRINCL = 3; - SOCKET_IP_OPTIONS = 4; - - SOCKET_TCP_NODELAY = 1; - SOCKET_TCP_MAXSEG = 2; - SOCKET_TCP_CORK = 3; - SOCKET_TCP_KEEPIDLE = 4; - SOCKET_TCP_KEEPINTVL = 5; - SOCKET_TCP_KEEPCNT = 6; - SOCKET_TCP_SYNCNT = 7; - SOCKET_TCP_LINGER2 = 8; - SOCKET_TCP_DEFER_ACCEPT = 9; - SOCKET_TCP_WINDOW_CLAMP = 10; - SOCKET_TCP_INFO = 11; - SOCKET_TCP_QUICKACK = 12; - } - - required SocketOptionLevel level = 1; - required SocketOptionName option = 2; - required bytes value = 3; -} - - -message SetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message SetSocketOptionsReply { -} - -message GetSocketOptionsRequest { - required string socket_descriptor = 1; - repeated SocketOption options = 2; -} - -message GetSocketOptionsReply { - repeated SocketOption options = 2; -} - - -message ConnectRequest { - required string socket_descriptor = 1; - required AddressPort remote_ip = 2; - optional double timeout_seconds = 3 [default=-1]; -} - -message ConnectReply { - optional AddressPort proxy_external_ip = 1; - - extensions 1000 to max; -} - - -message ListenRequest { - required string socket_descriptor = 1; - required int32 backlog = 2; -} - -message ListenReply { -} - - -message AcceptRequest { - required string socket_descriptor = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message AcceptReply { - optional bytes new_socket_descriptor = 2; - optional AddressPort remote_address = 3; -} - - - -message ShutDownRequest { - enum How { - SOCKET_SHUT_RD = 1; - SOCKET_SHUT_WR = 2; - SOCKET_SHUT_RDWR = 3; - } - required string socket_descriptor = 1; - required How how = 2; - required int64 send_offset = 3; -} - -message ShutDownReply { -} - - - -message CloseRequest { - required string socket_descriptor = 1; - optional int64 send_offset = 2 [default=-1]; -} - -message CloseReply { -} - - - -message SendRequest { - required string socket_descriptor = 1; - required bytes data = 2 [ctype=CORD]; - required int64 stream_offset = 3; - optional int32 flags = 4 [default=0]; - optional AddressPort send_to = 5; - optional double timeout_seconds = 6 [default=-1]; -} - -message SendReply { - optional int32 data_sent = 1; -} - - -message ReceiveRequest { - enum Flags { - MSG_OOB = 1; - MSG_PEEK = 2; - } - required string socket_descriptor = 1; - required int32 data_size = 2; - optional int32 flags = 3 [default=0]; - optional double timeout_seconds = 5 [default=-1]; -} - -message ReceiveReply { - optional int64 stream_offset = 2; - optional bytes data = 3 [ctype=CORD]; - optional AddressPort received_from = 4; - optional int32 buffer_size = 5; -} - - - -message PollEvent { - - enum PollEventFlag { - SOCKET_POLLNONE = 0; - SOCKET_POLLIN = 1; - SOCKET_POLLPRI = 2; - SOCKET_POLLOUT = 4; - SOCKET_POLLERR = 8; - SOCKET_POLLHUP = 16; - SOCKET_POLLNVAL = 32; - SOCKET_POLLRDNORM = 64; - SOCKET_POLLRDBAND = 128; - SOCKET_POLLWRNORM = 256; - SOCKET_POLLWRBAND = 512; - SOCKET_POLLMSG = 1024; - SOCKET_POLLREMOVE = 4096; - SOCKET_POLLRDHUP = 8192; - }; - - required string socket_descriptor = 1; - required int32 requested_events = 2; - required int32 observed_events = 3; -} - -message PollRequest { - repeated PollEvent events = 1; - optional double timeout_seconds = 2 [default=-1]; -} - -message PollReply { - repeated PollEvent events = 2; -} - -message ResolveRequest { - required string name = 1; - repeated CreateSocketRequest.SocketFamily address_families = 2; -} - -message ResolveReply { - enum ErrorCode { - SOCKET_EAI_ADDRFAMILY = 1; - SOCKET_EAI_AGAIN = 2; - SOCKET_EAI_BADFLAGS = 3; - SOCKET_EAI_FAIL = 4; - SOCKET_EAI_FAMILY = 5; - SOCKET_EAI_MEMORY = 6; - SOCKET_EAI_NODATA = 7; - SOCKET_EAI_NONAME = 8; - SOCKET_EAI_SERVICE = 9; - SOCKET_EAI_SOCKTYPE = 10; - SOCKET_EAI_SYSTEM = 11; - SOCKET_EAI_BADHINTS = 12; - SOCKET_EAI_PROTOCOL = 13; - SOCKET_EAI_OVERFLOW = 14; - SOCKET_EAI_MAX = 15; - }; - - repeated bytes packed_address = 2; - optional string canonical_name = 3; - repeated string aliases = 4; -} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae653..2ae8ab9fa 100644 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go index 21860ca08..6f169be48 100644 --- a/vendor/google.golang.org/appengine/namespace.go +++ b/vendor/google.golang.org/appengine/namespace.go @@ -5,11 +5,10 @@ package appengine import ( + "context" "fmt" "regexp" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" ) diff --git a/vendor/google.golang.org/appengine/socket/doc.go b/vendor/google.golang.org/appengine/socket/doc.go deleted file mode 100644 index 3de46df82..000000000 --- a/vendor/google.golang.org/appengine/socket/doc.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package socket provides outbound network sockets. -// -// This package is only required in the classic App Engine environment. -// Applications running only in App Engine "flexible environment" should -// use the standard library's net package. -package socket diff --git a/vendor/google.golang.org/appengine/socket/socket_classic.go b/vendor/google.golang.org/appengine/socket/socket_classic.go deleted file mode 100644 index 0ad50e2d3..000000000 --- a/vendor/google.golang.org/appengine/socket/socket_classic.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package socket - -import ( - "fmt" - "io" - "net" - "strconv" - "time" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" - - pb "google.golang.org/appengine/internal/socket" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - return DialTimeout(ctx, protocol, addr, 0) -} - -var ipFamilies = []pb.CreateSocketRequest_SocketFamily{ - pb.CreateSocketRequest_IPv4, - pb.CreateSocketRequest_IPv6, -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - dialCtx := ctx // Used for dialing and name resolution, but not stored in the *Conn. - if timeout > 0 { - var cancel context.CancelFunc - dialCtx, cancel = context.WithTimeout(ctx, timeout) - defer cancel() - } - - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.Atoi(portStr) - if err != nil { - return nil, fmt.Errorf("socket: bad port %q: %v", portStr, err) - } - - var prot pb.CreateSocketRequest_SocketProtocol - switch protocol { - case "tcp": - prot = pb.CreateSocketRequest_TCP - case "udp": - prot = pb.CreateSocketRequest_UDP - default: - return nil, fmt.Errorf("socket: unknown protocol %q", protocol) - } - - packedAddrs, resolved, err := resolve(dialCtx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - if len(packedAddrs) == 0 { - return nil, fmt.Errorf("no addresses for %q", host) - } - - packedAddr := packedAddrs[0] // use first address - fam := pb.CreateSocketRequest_IPv4 - if len(packedAddr) == net.IPv6len { - fam = pb.CreateSocketRequest_IPv6 - } - - req := &pb.CreateSocketRequest{ - Family: fam.Enum(), - Protocol: prot.Enum(), - RemoteIp: &pb.AddressPort{ - Port: proto.Int32(int32(port)), - PackedAddress: packedAddr, - }, - } - if resolved { - req.RemoteIp.HostnameHint = &host - } - res := &pb.CreateSocketReply{} - if err := internal.Call(dialCtx, "remote_socket", "CreateSocket", req, res); err != nil { - return nil, err - } - - return &Conn{ - ctx: ctx, - desc: res.GetSocketDescriptor(), - prot: prot, - local: res.ProxyExternalIp, - remote: req.RemoteIp, - }, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - packedAddrs, _, err := resolve(ctx, ipFamilies, host) - if err != nil { - return nil, fmt.Errorf("socket: failed resolving %q: %v", host, err) - } - addrs = make([]net.IP, len(packedAddrs)) - for i, pa := range packedAddrs { - addrs[i] = net.IP(pa) - } - return addrs, nil -} - -func resolve(ctx context.Context, fams []pb.CreateSocketRequest_SocketFamily, host string) ([][]byte, bool, error) { - // Check if it's an IP address. - if ip := net.ParseIP(host); ip != nil { - if ip := ip.To4(); ip != nil { - return [][]byte{ip}, false, nil - } - return [][]byte{ip}, false, nil - } - - req := &pb.ResolveRequest{ - Name: &host, - AddressFamilies: fams, - } - res := &pb.ResolveReply{} - if err := internal.Call(ctx, "remote_socket", "Resolve", req, res); err != nil { - // XXX: need to map to pb.ResolveReply_ErrorCode? - return nil, false, err - } - return res.PackedAddress, true, nil -} - -// withDeadline is like context.WithDeadline, except it ignores the zero deadline. -func withDeadline(parent context.Context, deadline time.Time) (context.Context, context.CancelFunc) { - if deadline.IsZero() { - return parent, func() {} - } - return context.WithDeadline(parent, deadline) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - ctx context.Context - desc string - offset int64 - - prot pb.CreateSocketRequest_SocketProtocol - local, remote *pb.AddressPort - - readDeadline, writeDeadline time.Time // optional -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - cn.ctx = ctx -} - -func (cn *Conn) Read(b []byte) (n int, err error) { - const maxRead = 1 << 20 - if len(b) > maxRead { - b = b[:maxRead] - } - - req := &pb.ReceiveRequest{ - SocketDescriptor: &cn.desc, - DataSize: proto.Int32(int32(len(b))), - } - res := &pb.ReceiveReply{} - if !cn.readDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.readDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.readDeadline) - defer cancel() - if err := internal.Call(ctx, "remote_socket", "Receive", req, res); err != nil { - return 0, err - } - if len(res.Data) == 0 { - return 0, io.EOF - } - if len(res.Data) > len(b) { - return 0, fmt.Errorf("socket: internal error: read too much data: %d > %d", len(res.Data), len(b)) - } - return copy(b, res.Data), nil -} - -func (cn *Conn) Write(b []byte) (n int, err error) { - const lim = 1 << 20 // max per chunk - - for n < len(b) { - chunk := b[n:] - if len(chunk) > lim { - chunk = chunk[:lim] - } - - req := &pb.SendRequest{ - SocketDescriptor: &cn.desc, - Data: chunk, - StreamOffset: &cn.offset, - } - res := &pb.SendReply{} - if !cn.writeDeadline.IsZero() { - req.TimeoutSeconds = proto.Float64(cn.writeDeadline.Sub(time.Now()).Seconds()) - } - ctx, cancel := withDeadline(cn.ctx, cn.writeDeadline) - defer cancel() - if err = internal.Call(ctx, "remote_socket", "Send", req, res); err != nil { - // assume zero bytes were sent in this RPC - break - } - n += int(res.GetDataSent()) - cn.offset += int64(res.GetDataSent()) - } - - return -} - -func (cn *Conn) Close() error { - req := &pb.CloseRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.CloseReply{} - if err := internal.Call(cn.ctx, "remote_socket", "Close", req, res); err != nil { - return err - } - cn.desc = "CLOSED" - return nil -} - -func addr(prot pb.CreateSocketRequest_SocketProtocol, ap *pb.AddressPort) net.Addr { - if ap == nil { - return nil - } - switch prot { - case pb.CreateSocketRequest_TCP: - return &net.TCPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - case pb.CreateSocketRequest_UDP: - return &net.UDPAddr{ - IP: net.IP(ap.PackedAddress), - Port: int(*ap.Port), - } - } - panic("unknown protocol " + prot.String()) -} - -func (cn *Conn) LocalAddr() net.Addr { return addr(cn.prot, cn.local) } -func (cn *Conn) RemoteAddr() net.Addr { return addr(cn.prot, cn.remote) } - -func (cn *Conn) SetDeadline(t time.Time) error { - cn.readDeadline = t - cn.writeDeadline = t - return nil -} - -func (cn *Conn) SetReadDeadline(t time.Time) error { - cn.readDeadline = t - return nil -} - -func (cn *Conn) SetWriteDeadline(t time.Time) error { - cn.writeDeadline = t - return nil -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - req := &pb.GetSocketNameRequest{ - SocketDescriptor: &cn.desc, - } - res := &pb.GetSocketNameReply{} - return internal.Call(cn.ctx, "remote_socket", "GetSocketName", req, res) -} - -func init() { - internal.RegisterErrorCodeMap("remote_socket", pb.RemoteSocketServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/socket/socket_vm.go b/vendor/google.golang.org/appengine/socket/socket_vm.go deleted file mode 100644 index c804169a1..000000000 --- a/vendor/google.golang.org/appengine/socket/socket_vm.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package socket - -import ( - "net" - "time" - - "golang.org/x/net/context" -) - -// Dial connects to the address addr on the network protocol. -// The address format is host:port, where host may be a hostname or an IP address. -// Known protocols are "tcp" and "udp". -// The returned connection satisfies net.Conn, and is valid while ctx is valid; -// if the connection is to be used after ctx becomes invalid, invoke SetContext -// with the new context. -func Dial(ctx context.Context, protocol, addr string) (*Conn, error) { - conn, err := net.Dial(protocol, addr) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// DialTimeout is like Dial but takes a timeout. -// The timeout includes name resolution, if required. -func DialTimeout(ctx context.Context, protocol, addr string, timeout time.Duration) (*Conn, error) { - conn, err := net.DialTimeout(protocol, addr, timeout) - if err != nil { - return nil, err - } - return &Conn{conn}, nil -} - -// LookupIP returns the given host's IP addresses. -func LookupIP(ctx context.Context, host string) (addrs []net.IP, err error) { - return net.LookupIP(host) -} - -// Conn represents a socket connection. -// It implements net.Conn. -type Conn struct { - net.Conn -} - -// SetContext sets the context that is used by this Conn. -// It is usually used only when using a Conn that was created in a different context, -// such as when a connection is created during a warmup request but used while -// servicing a user request. -func (cn *Conn) SetContext(ctx context.Context) { - // This function is not required in App Engine "flexible environment". -} - -// KeepAlive signals that the connection is still in use. -// It may be called to prevent the socket being closed due to inactivity. -func (cn *Conn) KeepAlive() error { - // This function is not required in App Engine "flexible environment". - return nil -} diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go index 05642a992..fcf3ad0a5 100644 --- a/vendor/google.golang.org/appengine/timeout.go +++ b/vendor/google.golang.org/appengine/timeout.go @@ -4,7 +4,7 @@ package appengine -import "golang.org/x/net/context" +import "context" // IsTimeoutError reports whether err is a timeout error. func IsTimeoutError(err error) bool { diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh deleted file mode 100644 index 785b62f46..000000000 --- a/vendor/google.golang.org/appengine/travis_install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -if [[ $GO111MODULE == "on" ]]; then - go get . -else - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) -fi - -if [[ $GOAPP == "true" ]]; then - mkdir /tmp/sdk - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" - unzip -q /tmp/sdk.zip -d /tmp/sdk - # NOTE: Set the following env vars in the test script: - # export PATH="$PATH:/tmp/sdk/go_appengine" - # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py -fi - diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh deleted file mode 100644 index d4390f045..000000000 --- a/vendor/google.golang.org/appengine/travis_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -go version -go test -v google.golang.org/appengine/... -go test -v -race google.golang.org/appengine/... -if [[ $GOAPP == "true" ]]; then - export PATH="$PATH:/tmp/sdk/go_appengine" - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py - goapp version - goapp test -v google.golang.org/appengine/... -fi diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go index 6ffe1e6d9..6c0d72418 100644 --- a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -7,6 +7,7 @@ package urlfetch // import "google.golang.org/appengine/urlfetch" import ( + "context" "errors" "fmt" "io" @@ -18,7 +19,6 @@ import ( "time" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/urlfetch" @@ -44,11 +44,10 @@ type Transport struct { var _ http.RoundTripper = (*Transport)(nil) // Client returns an *http.Client using a default urlfetch Transport. This -// client will have the default deadline of 5 seconds, and will check the -// validity of SSL certificates. +// client will check the validity of SSL certificates. // -// Any deadline of the provided context will be used for requests through this client; -// if the client does not have a deadline then a 5 second default is used. +// Any deadline of the provided context will be used for requests through this client. +// If the client does not have a deadline, then an App Engine default of 60 second is used. func Client(ctx context.Context) *http.Client { return &http.Client{ Transport: &Transport{ diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go new file mode 100644 index 000000000..d02e6bbc8 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go @@ -0,0 +1,295 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.21.12 +// source: google/api/field_info.proto + +package annotations + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The standard format of a field value. The supported formats are all backed +// by either an RFC defined by the IETF or a Google-defined AIP. +type FieldInfo_Format int32 + +const ( + // Default, unspecified value. + FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0 + // Universally Unique Identifier, version 4, value as defined by + // https://datatracker.ietf.org/doc/html/rfc4122. The value may be + // normalized to entirely lowercase letters. For example, the value + // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to + // `f47ac10b-58cc-0372-8567-0e02b2c3d479`. + FieldInfo_UUID4 FieldInfo_Format = 1 + // Internet Protocol v4 value as defined by [RFC + // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be + // condensed, with leading zeros in each octet stripped. For example, + // `001.022.233.040` would be condensed to `1.22.233.40`. + FieldInfo_IPV4 FieldInfo_Format = 2 + // Internet Protocol v6 value as defined by [RFC + // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be + // normalized to entirely lowercase letters, and zero-padded partial and + // empty octets. For example, the value `2001:DB8::` would be normalized to + // `2001:0db8:0:0`. + FieldInfo_IPV6 FieldInfo_Format = 3 + // An IP address in either v4 or v6 format as described by the individual + // values defined herein. See the comments on the IPV4 and IPV6 types for + // allowed normalizations of each. + FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4 +) + +// Enum value maps for FieldInfo_Format. +var ( + FieldInfo_Format_name = map[int32]string{ + 0: "FORMAT_UNSPECIFIED", + 1: "UUID4", + 2: "IPV4", + 3: "IPV6", + 4: "IPV4_OR_IPV6", + } + FieldInfo_Format_value = map[string]int32{ + "FORMAT_UNSPECIFIED": 0, + "UUID4": 1, + "IPV4": 2, + "IPV6": 3, + "IPV4_OR_IPV6": 4, + } +) + +func (x FieldInfo_Format) Enum() *FieldInfo_Format { + p := new(FieldInfo_Format) + *p = x + return p +} + +func (x FieldInfo_Format) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_field_info_proto_enumTypes[0].Descriptor() +} + +func (FieldInfo_Format) Type() protoreflect.EnumType { + return &file_google_api_field_info_proto_enumTypes[0] +} + +func (x FieldInfo_Format) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use FieldInfo_Format.Descriptor instead. +func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0} +} + +// Rich semantic information of an API field beyond basic typing. +type FieldInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The standard format of a field value. This does not explicitly configure + // any API consumer, just documents the API's format for the field it is + // applied to. + Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"` +} + +func (x *FieldInfo) Reset() { + *x = FieldInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_field_info_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FieldInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FieldInfo) ProtoMessage() {} + +func (x *FieldInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_api_field_info_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead. +func (*FieldInfo) Descriptor() ([]byte, []int) { + return file_google_api_field_info_proto_rawDescGZIP(), []int{0} +} + +func (x *FieldInfo) GetFormat() FieldInfo_Format { + if x != nil { + return x.Format + } + return FieldInfo_FORMAT_UNSPECIFIED +} + +var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*FieldInfo)(nil), + Field: 291403980, + Name: "google.api.field_info", + Tag: "bytes,291403980,opt,name=field_info", + Filename: "google/api/field_info.proto", + }, +} + +// Extension fields to descriptorpb.FieldOptions. +var ( + // Rich semantic descriptor of an API field beyond the basic typing. + // + // Examples: + // + // string request_id = 1 [(google.api.field_info).format = UUID4]; + // string old_ip_address = 2 [(google.api.field_info).format = IPV4]; + // string new_ip_address = 3 [(google.api.field_info).format = IPV6]; + // string actual_ip_address = 4 [ + // (google.api.field_info).format = IPV4_OR_IPV6 + // ]; + // + // optional google.api.FieldInfo field_info = 291403980; + E_FieldInfo = &file_google_api_field_info_proto_extTypes[0] +) + +var File_google_api_field_info_proto protoreflect.FileDescriptor + +var file_google_api_field_info_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x09, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, + 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, + 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, + 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, + 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f, 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, + 0x10, 0x04, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, + 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x6c, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, + 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_google_api_field_info_proto_rawDescOnce sync.Once + file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc +) + +func file_google_api_field_info_proto_rawDescGZIP() []byte { + file_google_api_field_info_proto_rawDescOnce.Do(func() { + file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData) + }) + return file_google_api_field_info_proto_rawDescData +} + +var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_field_info_proto_goTypes = []interface{}{ + (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format + (*FieldInfo)(nil), // 1: google.api.FieldInfo + (*descriptorpb.FieldOptions)(nil), // 2: google.protobuf.FieldOptions +} +var file_google_api_field_info_proto_depIdxs = []int32{ + 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format + 2, // 1: google.api.field_info:extendee -> google.protobuf.FieldOptions + 1, // 2: google.api.field_info:type_name -> google.api.FieldInfo + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 2, // [2:3] is the sub-list for extension type_name + 1, // [1:2] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_field_info_proto_init() } +func file_google_api_field_info_proto_init() { + if File_google_api_field_info_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FieldInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_field_info_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 1, + NumServices: 0, + }, + GoTypes: file_google_api_field_info_proto_goTypes, + DependencyIndexes: file_google_api_field_info_proto_depIdxs, + EnumInfos: file_google_api_field_info_proto_enumTypes, + MessageInfos: file_google_api_field_info_proto_msgTypes, + ExtensionInfos: file_google_api_field_info_proto_extTypes, + }.Build() + File_google_api_field_info_proto = out.File + file_google_api_field_info_proto_rawDesc = nil + file_google_api_field_info_proto_goTypes = nil + file_google_api_field_info_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 0e6ae69a5..ab0fbb79b 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the @@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Installation -With [Go module][] support (Go 1.11+), simply add the following import +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + ```go import "google.golang.org/grpc" ``` -to your code, and then `go [build|run|test]` will automatically fetch the -necessary dependencies. - -Otherwise, to install the `grpc-go` package, run the following command: - -```console -$ go get -u google.golang.org/grpc -``` - > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -56,15 +49,6 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ```sh - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -76,33 +60,13 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -#### If you are using Go modules: - -Ensure your gRPC-Go version is `require`d at the appropriate version in -the same module containing the generated `.pb.go` files. For example, -`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: - -```go -module - -require ( - google.golang.org/grpc v1.27.0 -) -``` - -#### If you are *not* using Go modules: - -Update the `proto` package, gRPC package, and rebuild the `.proto` files: - -```sh -go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -go get -u google.golang.org/grpc -protoc --go_out=plugins=grpc:. *.proto -``` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. ### How to turn on logging @@ -121,9 +85,11 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have configured - your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 49712aca3..52d530d7a 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -34,26 +34,26 @@ import ( // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// interface{}) bool', it will be called by (*Attributes).Equal to determine -// whether two values with the same key should be considered equal. +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. type Attributes struct { - m map[interface{}]interface{} + m map[any]any } // New returns a new Attributes containing the key/value pair. -func New(key, value interface{}) *Attributes { - return &Attributes{m: map[interface{}]interface{}{key: value}} +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value interface{}) *Attributes { +func (a *Attributes) WithValue(key, value any) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + n := &Attributes{m: make(map[any]any, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -63,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key interface{}) interface{} { +func (a *Attributes) Value(key any) any { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) -// bool' is implemented for a value in the attributes, it is called to -// determine if the value matches the one stored in the other attributes. If -// Equal is not implemented, standard equality is used to determine if the two -// values are equal. Note that some types (e.g. maps) aren't comparable by -// default, so they must be wrapped in a struct, or in an alias type, with Equal -// defined. +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -93,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if eq, ok := v.(interface{ Equal(o any) bool }); ok { if !eq.Equal(ov) { return false } @@ -122,9 +121,9 @@ func (a *Attributes) String() string { return sb.String() } -func str(x interface{}) string { +func str(x any) (s string) { if v, ok := x.(fmt.Stringer); ok { - return v.String() + return fmt.Sprint(v) } else if v, ok := x.(string); ok { return v } diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 8f00523c0..d79560a2e 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } @@ -105,8 +120,8 @@ type SubConn interface { // // This will trigger a state transition for the SubConn. // - // Deprecated: This method is now part of the ClientConn interface and will - // eventually be removed from here. + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -115,6 +130,13 @@ type SubConn interface { // creates a new one and returns it. Returns a close function which must // be called when the Producer is no longer needed. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -129,6 +151,11 @@ type NewSubConnOptions struct { // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -150,16 +177,24 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This will trigger a state transition for the SubConn. + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -250,7 +285,7 @@ type DoneInfo struct { // trailing metadata. // // The only supported type now is *orca_v3.LoadReport. - ServerLoad interface{} + ServerLoad any } var ( @@ -343,9 +378,13 @@ type Balancer interface { ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } @@ -390,15 +429,14 @@ var ErrBadResolverState = errors.New("bad resolver state") type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as interface{} to avoid a - // dependency cycle. Should also return a close function that will be - // called when all references to the Producer have been given up. - Build(grpcClientConnInterface interface{}) (p Producer, close func()) + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } // A Producer is a type shared among potentially many consumers. It is // associated with a SubConn, and an implementation will typically contain // other methods to provide additional functionality, e.g. configuration or // subscription registration. -type Producer interface { -} +type Producer any diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index 3929c26d3..a7f1eeec8 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - b.cc.RemoveSubConn(sc) + sc.Shutdown() b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index f070878bd..f35453028 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,7 +19,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index 6d698229a..86ba65be4 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -32,14 +32,18 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/resolver/dns" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" durationpb "github.com/golang/protobuf/ptypes/duration" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" @@ -132,7 +136,11 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal // This generates a manual resolver builder with a fixed scheme. This // scheme will be used to dial to remote LB, so we can send filtered // address updates to remote LB ClientConn using this manual resolver. - r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc} + mr := manual.NewBuilderWithScheme("grpclb-internal") + // ResolveNow() on this manual resolver is forwarded to the parent + // ClientConn, so when grpclb client loses contact with the remote balancer, + // the parent ClientConn's resolver will re-resolve. + mr.ResolveNowCallback = cc.ResolveNow lb := &lbBalancer{ cc: newLBCacheClientConn(cc), @@ -142,23 +150,24 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal fallbackTimeout: b.fallbackTimeout, doneCh: make(chan struct{}), - manualResolver: r, + manualResolver: mr, subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), - picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, + picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), clientStats: newRPCStats(), backoff: backoff.DefaultExponential, // TODO: make backoff configurable. } + lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[grpclb %p] ", lb)) var err error if opt.CredsBundle != nil { lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) if err != nil { - logger.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) + lb.logger.Warningf("Failed to create credentials used for connecting to grpclb: %v", err) } lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) if err != nil { - logger.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) + lb.logger.Warningf("Failed to create credentials used for connecting to backends returned by grpclb: %v", err) } } @@ -170,6 +179,7 @@ type lbBalancer struct { dialTarget string // user's dial target target string // same as dialTarget unless overridden in service config opt balancer.BuildOptions + logger *internalgrpclog.PrefixLogger usePickFirst bool @@ -188,7 +198,7 @@ type lbBalancer struct { // manualResolver is used in the remote LB ClientConn inside grpclb. When // resolved address updates are received by grpclb, filtered updates will be // send to remote LB ClientConn through this resolver. - manualResolver *lbManualResolver + manualResolver *manual.Resolver // The ClientConn to talk to the remote balancer. ccRemoteLB *remoteBalancerCCWrapper // backoff for calling remote balancer. @@ -213,7 +223,7 @@ type lbBalancer struct { backendAddrsWithoutMetadata []resolver.Address // Roundrobin functionalities. state connectivity.State - subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. + subConns map[resolver.Address]balancer.SubConn // Used to new/shutdown SubConn. scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. picker balancer.Picker // Support fallback to resolved backend addresses if there's no response @@ -236,12 +246,12 @@ type lbBalancer struct { // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { - lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)} + lb.picker = base.NewErrPicker(fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)) return } if lb.state == connectivity.Connecting { - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) return } @@ -268,7 +278,7 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // // This doesn't seem to be necessary after the connecting check above. // Kept for safety. - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) return } if lb.inFallback { @@ -290,7 +300,7 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // aggregateSubConnStats calculate the aggregated state of SubConns in // lb.SubConns. These SubConns are subconns in use (when switching between // fallback and grpclb). lb.scState contains states for all SubConns, including -// those in cache (SubConns are cached for 10 seconds after remove). +// those in cache (SubConns are cached for 10 seconds after shutdown). // // The aggregated state is: // - If at least one SubConn in Ready, the aggregated state is Ready; @@ -319,18 +329,24 @@ func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { return connectivity.TransientFailure } +// UpdateSubConnState is unused; NewSubConn's options always specifies +// updateSubConnState as the listener. func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { + lb.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, scs) +} + +func (lb *lbBalancer) updateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { s := scs.ConnectivityState - if logger.V(2) { - logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + if lb.logger.V(2) { + lb.logger.Infof("SubConn state change: %p, %v", sc, s) } lb.mu.Lock() defer lb.mu.Unlock() oldS, ok := lb.scStates[sc] if !ok { - if logger.V(2) { - logger.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if lb.logger.V(2) { + lb.logger.Infof("Received state change for an unknown SubConn: %p, %v", sc, s) } return } @@ -339,8 +355,8 @@ func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubCo case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(lb.scStates, sc) case connectivity.TransientFailure: lb.connErr = scs.ConnectionError @@ -373,8 +389,13 @@ func (lb *lbBalancer) updateStateAndPicker(forceRegeneratePicker bool, resetDrop if forceRegeneratePicker || (lb.state != oldAggrState) { lb.regeneratePicker(resetDrop) } + var cc balancer.ClientConn = lb.cc + if lb.usePickFirst { + // Bypass the caching layer that would wrap the picker. + cc = lb.cc.ClientConn + } - lb.cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) + cc.UpdateState(balancer.State{ConnectivityState: lb.state, Picker: lb.picker}) } // fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use @@ -430,8 +451,8 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { if lb.usePickFirst == newUsePickFirst { return } - if logger.V(2) { - logger.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) + if lb.logger.V(2) { + lb.logger.Infof("Switching mode. Is pick_first used for backends? %v", newUsePickFirst) } lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) } @@ -442,23 +463,15 @@ func (lb *lbBalancer) ResolverError(error) { } func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - if logger.V(2) { - logger.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) + if lb.logger.V(2) { + lb.logger.Infof("UpdateClientConnState: %s", pretty.ToJSON(ccs)) } gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) lb.handleServiceConfig(gc) - addrs := ccs.ResolverState.Addresses + backendAddrs := ccs.ResolverState.Addresses - var remoteBalancerAddrs, backendAddrs []resolver.Address - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - a.Type = resolver.Backend - remoteBalancerAddrs = append(remoteBalancerAddrs, a) - } else { - backendAddrs = append(backendAddrs, a) - } - } + var remoteBalancerAddrs []resolver.Address if sd := grpclbstate.Get(ccs.ResolverState); sd != nil { // Override any balancer addresses provided via // ccs.ResolverState.Addresses. @@ -479,7 +492,9 @@ func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error } else if lb.ccRemoteLB == nil { // First time receiving resolved addresses, create a cc to remote // balancers. - lb.newRemoteBalancerCCWrapper() + if err := lb.newRemoteBalancerCCWrapper(); err != nil { + return err + } // Start the fallback goroutine. go lb.fallbackToBackendsAfter(lb.fallbackTimeout) } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 39bc5cc71..20c5f2ec3 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -98,15 +98,6 @@ func (s *rpcStats) knownReceived() { atomic.AddInt64(&s.numCallsFinished, 1) } -type errPicker struct { - // Pick always returns this err. - err error -} - -func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return balancer.PickResult{}, p.err -} - // rrPicker does roundrobin on subConns. It's typically used when there's no // response from remote balancer, and grpclb falls back to the resolved // backends. diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index e56006d71..c8fe1edd8 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -27,11 +27,8 @@ import ( "time" "github.com/golang/protobuf/proto" - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/balancer" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" @@ -39,13 +36,28 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" ) +func serverListEqual(a, b []*lbpb.Server) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !proto.Equal(a[i], b[i]) { + return false + } + } + return true +} + // processServerList updates balancer's internal state, create/remove SubConns // and regenerates picker using the received serverList. func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { - if logger.V(2) { - logger.Infof("lbBalancer: processing server list: %+v", l) + if lb.logger.V(2) { + lb.logger.Infof("Processing server list: %#v", l) } lb.mu.Lock() defer lb.mu.Unlock() @@ -55,9 +67,9 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { lb.serverListReceived = true // If the new server list == old server list, do nothing. - if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { - if logger.V(2) { - logger.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + if serverListEqual(lb.fullServerList, l.Servers) { + if lb.logger.V(2) { + lb.logger.Infof("Ignoring new server list as it is the same as the previous one") } return } @@ -78,9 +90,8 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { ipStr = fmt.Sprintf("[%s]", ipStr) } addr := imetadata.Set(resolver.Address{Addr: fmt.Sprintf("%s:%d", ipStr, s.Port)}, md) - if logger.V(2) { - logger.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", - i, ipStr, s.Port, s.LoadBalanceToken) + if lb.logger.V(2) { + lb.logger.Infof("Server list entry:|%d|, ipStr:|%s|, port:|%d|, load balancer token:|%v|", i, ipStr, s.Port, s.LoadBalanceToken) } backendAddrs = append(backendAddrs, addr) } @@ -113,7 +124,6 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback } balancingPolicyChanged := lb.usePickFirst != pickFirst - oldUsePickFirst := lb.usePickFirst lb.usePickFirst = pickFirst if fallbackModeChanged || balancingPolicyChanged { @@ -123,13 +133,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback // For fallback mode switching with pickfirst, we want to recreate the // SubConn because the creds could be different. for a, sc := range lb.subConns { - if oldUsePickFirst { - // If old SubConn were created for pickfirst, bypass cache and - // remove directly. - lb.cc.cc.RemoveSubConn(sc) - } else { - lb.cc.RemoveSubConn(sc) - } + sc.Shutdown() delete(lb.subConns, a) } } @@ -144,18 +148,19 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback } if sc != nil { if len(backendAddrs) == 0 { - lb.cc.cc.RemoveSubConn(sc) + sc.Shutdown() delete(lb.subConns, scKey) return } - lb.cc.cc.UpdateAddresses(sc, backendAddrs) + lb.cc.ClientConn.UpdateAddresses(sc, backendAddrs) sc.Connect() return } + opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } // This bypasses the cc wrapper with SubConn cache. - sc, err := lb.cc.cc.NewSubConn(backendAddrs, opts) + sc, err := lb.cc.ClientConn.NewSubConn(backendAddrs, opts) if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) + lb.logger.Warningf("Failed to create new SubConn: %v", err) return } sc.Connect() @@ -176,9 +181,11 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback if _, ok := lb.subConns[addrWithoutAttrs]; !ok { // Use addrWithMD to create the SubConn. + var sc balancer.SubConn + opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) + lb.logger.Warningf("Failed to create new SubConn: %v", err) continue } lb.subConns[addrWithoutAttrs] = sc // Use the addr without MD as key for the map. @@ -194,7 +201,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback for a, sc := range lb.subConns { // a was removed by resolver. if _, ok := addrsSet[a]; !ok { - lb.cc.RemoveSubConn(sc) + sc.Shutdown() delete(lb.subConns, a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. // The entry will be deleted in UpdateSubConnState. @@ -221,7 +228,7 @@ type remoteBalancerCCWrapper struct { wg sync.WaitGroup } -func (lb *lbBalancer) newRemoteBalancerCCWrapper() { +func (lb *lbBalancer) newRemoteBalancerCCWrapper() error { var dopts []grpc.DialOption if creds := lb.opt.DialCreds; creds != nil { dopts = append(dopts, grpc.WithTransportCredentials(creds)) @@ -252,9 +259,10 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // // The grpclb server addresses will set field ServerName, and creds will // receive ServerName as authority. - cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) + target := lb.manualResolver.Scheme() + ":///grpclb.subClientConn" + cc, err := grpc.Dial(target, dopts...) if err != nil { - logger.Fatalf("failed to dial: %v", err) + return fmt.Errorf("grpc.Dial(%s): %v", target, err) } ccw := &remoteBalancerCCWrapper{ cc: cc, @@ -265,6 +273,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { lb.ccRemoteLB = ccw ccw.wg.Add(1) go ccw.watchRemoteBalancer() + return nil } // close closed the ClientConn to remote balancer, and waits until all @@ -412,14 +421,14 @@ func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { default: if err != nil { if err == errServerTerminatedConnection { - logger.Info(err) + ccw.lb.logger.Infof("Call to remote balancer failed: %v", err) } else { - logger.Warning(err) + ccw.lb.logger.Warningf("Call to remote balancer failed: %v", err) } } } // Trigger a re-resolve when the stream errors. - ccw.lb.cc.cc.ResolveNow(resolver.ResolveNowOptions{}) + ccw.lb.cc.ClientConn.ResolveNow(resolver.ResolveNowOptions{}) ccw.lb.mu.Lock() ccw.lb.remoteBalancerConnected = false diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go index 373f04b98..c0f762c0c 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -27,75 +27,15 @@ import ( "google.golang.org/grpc/resolver" ) -// The parent ClientConn should re-resolve when grpclb loses connection to the -// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, -// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's -// ResolveNow, and eventually results in re-resolve happening in parent -// ClientConn's resolver (DNS for example). -// -// parent -// ClientConn -// +-----------------------------------------------------------------+ -// | parent +---------------------------------+ | -// | DNS ClientConn | grpclb | | -// | resolver balancerWrapper | | | -// | + + | grpclb grpclb | | -// | | | | ManualResolver ClientConn | | -// | | | | + + | | -// | | | | | | Transient | | -// | | | | | | Failure | | -// | | | | | <--------- | | | -// | | | <--------------- | ResolveNow | | | -// | | <--------- | ResolveNow | | | | | -// | | ResolveNow | | | | | | -// | | | | | | | | -// | + + | + + | | -// | +---------------------------------+ | -// +-----------------------------------------------------------------+ - -// lbManualResolver is used by the ClientConn inside grpclb. It's a manual -// resolver with a special ResolveNow() function. -// -// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, -// so when grpclb client lose contact with remote balancers, the parent -// ClientConn's resolver will re-resolve. -type lbManualResolver struct { - scheme string - ccr resolver.ClientConn - - ccb balancer.ClientConn -} - -func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - r.ccr = cc - return r, nil -} - -func (r *lbManualResolver) Scheme() string { - return r.scheme -} - -// ResolveNow calls resolveNow on the parent ClientConn. -func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) { - r.ccb.ResolveNow(o) -} - -// Close is a noop for Resolver. -func (*lbManualResolver) Close() {} - -// UpdateState calls cc.UpdateState. -func (r *lbManualResolver) UpdateState(s resolver.State) { - r.ccr.UpdateState(s) -} - const subConnCacheTime = time.Second * 10 // lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. -// SubConns will be kept in cache for subConnCacheTime before being removed. +// SubConns will be kept in cache for subConnCacheTime before being shut down. // -// Its new and remove methods are updated to do cache first. +// Its NewSubconn and SubConn.Shutdown methods are updated to do cache first. type lbCacheClientConn struct { - cc balancer.ClientConn + balancer.ClientConn + timeout time.Duration mu sync.Mutex @@ -113,7 +53,7 @@ type subConnCacheEntry struct { func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { return &lbCacheClientConn{ - cc: cc, + ClientConn: cc, timeout: subConnCacheTime, subConnCache: make(map[resolver.Address]*subConnCacheEntry), subConnToAddr: make(map[balancer.SubConn]resolver.Address), @@ -137,16 +77,27 @@ func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer return entry.sc, nil } - scNew, err := ccc.cc.NewSubConn(addrs, opts) + scNew, err := ccc.ClientConn.NewSubConn(addrs, opts) if err != nil { return nil, err } + scNew = &lbCacheSubConn{SubConn: scNew, ccc: ccc} ccc.subConnToAddr[scNew] = addrWithoutAttrs return scNew, nil } func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { + logger.Errorf("RemoveSubConn(%v) called unexpectedly", sc) +} + +type lbCacheSubConn struct { + balancer.SubConn + ccc *lbCacheClientConn +} + +func (sc *lbCacheSubConn) Shutdown() { + ccc := sc.ccc ccc.mu.Lock() defer ccc.mu.Unlock() addr, ok := ccc.subConnToAddr[sc] @@ -156,11 +107,11 @@ func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { if entry, ok := ccc.subConnCache[addr]; ok { if entry.sc != sc { - // This could happen if NewSubConn was called multiple times for the - // same address, and those SubConns are all removed. We remove sc - // immediately here. + // This could happen if NewSubConn was called multiple times for + // the same address, and those SubConns are all shut down. We + // remove sc immediately here. delete(ccc.subConnToAddr, sc) - ccc.cc.RemoveSubConn(sc) + sc.SubConn.Shutdown() } return } @@ -176,7 +127,7 @@ func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { if entry.abortDeleting { return } - ccc.cc.RemoveSubConn(sc) + sc.SubConn.Shutdown() delete(ccc.subConnToAddr, sc) delete(ccc.subConnCache, addr) }) @@ -195,14 +146,28 @@ func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { } func (ccc *lbCacheClientConn) UpdateState(s balancer.State) { - ccc.cc.UpdateState(s) + s.Picker = &lbCachePicker{Picker: s.Picker} + ccc.ClientConn.UpdateState(s) } func (ccc *lbCacheClientConn) close() { ccc.mu.Lock() - // Only cancel all existing timers. There's no need to remove SubConns. + defer ccc.mu.Unlock() + // Only cancel all existing timers. There's no need to shut down SubConns. for _, entry := range ccc.subConnCache { entry.cancel() } - ccc.mu.Unlock() +} + +type lbCachePicker struct { + balancer.Picker +} + +func (cp *lbCachePicker) Pick(i balancer.PickInfo) (balancer.PickResult, error) { + res, err := cp.Picker.Pick(i) + if err != nil { + return res, err + } + res.SubConn = res.SubConn.(*lbCacheSubConn).SubConn + return res, nil } diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 04b9ad411..a4411c22b 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -99,20 +99,6 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat // lock held. But the lock guards only the scheduling part. The actual // callback is called asynchronously without the lock being held. ok := ccb.serializer.Schedule(func(_ context.Context) { - // If the addresses specified in the update contain addresses of type - // "grpclb" and the selected LB policy is not "grpclb", these addresses - // will be filtered out and ccs will be modified with the updated - // address list. - if ccb.curBalancerName != grpclbName { - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - ccs.ResolverState.Addresses = addrs - } errCh <- ccb.balancer.UpdateClientConnState(*ccs) }) if !ok { @@ -139,7 +125,9 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { ccb.mu.Lock() ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + sc.(*acBalancerWrapper).stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) ccb.mu.Unlock() } @@ -221,7 +209,7 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { } ccb.mode = m - done := ccb.serializer.Done + done := ccb.serializer.Done() b := ccb.balancer ok := ccb.serializer.Schedule(func(_ context.Context) { // Close the serializer to ensure that no more calls from gRPC are sent @@ -238,11 +226,9 @@ func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { } ccb.mu.Unlock() - // Give enqueued callbacks a chance to finish. + // Give enqueued callbacks a chance to finish before closing the balancer. <-done - // Spawn a goroutine to close the balancer (since it may block trying to - // cleanup all allocated resources) and return early. - go b.Close() + b.Close() } // exitIdleMode is invoked by grpc when the channel exits idle mode either @@ -314,29 +300,19 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } ac.acbw = acbw return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { @@ -380,7 +356,9 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - ac *addrConn // read-only + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) mu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer @@ -398,6 +376,23 @@ func (acbw *acBalancerWrapper) Connect() { go acbw.ac.connect() } +func (acbw *acBalancerWrapper) Shutdown() { + ccb := acbw.ccb + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } + + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + // NewStream begins a streaming RPC on the addrConn. If the addrConn is not // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. @@ -411,7 +406,7 @@ func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, // Invoke performs a unary RPC. If the addrConn is not ready, returns // errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ec2c2fa14..595480112 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index e6a1dc5d7..788c89c16 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -26,12 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return err - } - defer cc.idlenessMgr.onCallEnd() - +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -61,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index bfd7555a8..429c389e4 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -34,9 +34,11 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" @@ -54,8 +56,6 @@ import ( const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -138,7 +138,6 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, - csMgr: &connectivityStateManager{}, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), czData: new(channelzData), @@ -191,6 +190,8 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // Register ClientConn with channelz. cc.channelzRegistration(target) + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -266,7 +267,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * // Configure idleness support with configured idle timeout or default idle // timeout duration. Idleness can be explicitly disabled by the user, by // setting the dial option to 0. - cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) + cc.idlenessMgr = idle.NewManager(idle.ManagerOptions{Enforcer: (*idler)(cc), Timeout: cc.dopts.idleTimeout, Logger: logger}) // Return early for non-blocking dials. if !cc.dopts.block { @@ -317,6 +318,16 @@ func (cc *ClientConn) addTraceEvent(msg string) { channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) } +type idler ClientConn + +func (i *idler) EnterIdleMode() error { + return (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + // exitIdleMode moves the channel out of idle mode by recreating the name // resolver and load balancer. func (cc *ClientConn) exitIdleMode() error { @@ -326,8 +337,8 @@ func (cc *ClientConn) exitIdleMode() error { return errConnClosing } if cc.idlenessState != ccIdlenessStateIdle { + channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) cc.mu.Unlock() - logger.Info("ClientConn asked to exit idle mode when not in idle mode") return nil } @@ -350,7 +361,7 @@ func (cc *ClientConn) exitIdleMode() error { cc.idlenessState = ccIdlenessStateExitingIdle exitedIdle := false if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper() + cc.blockingpicker = newPickerWrapper(cc.dopts.copts.StatsHandlers) } else { cc.blockingpicker.exitIdleMode() exitedIdle = true @@ -393,12 +404,13 @@ func (cc *ClientConn) exitIdleMode() error { // name resolver, load balancer and any subchannels. func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { - cc.mu.Unlock() return ErrClientConnClosing } if cc.idlenessState != ccIdlenessStateActive { - logger.Error("ClientConn asked to enter idle mode when not active") + channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) return nil } @@ -419,14 +431,14 @@ func (cc *ClientConn) enterIdleMode() error { cc.balancerWrapper.enterIdleMode() cc.csMgr.updateState(connectivity.Idle) cc.idlenessState = ccIdlenessStateIdle - cc.mu.Unlock() + cc.addTraceEvent("entering idle mode") go func() { - cc.addTraceEvent("entering idle mode") for ac := range conns { ac.tearDown(errConnIdling) } }() + return nil } @@ -475,7 +487,6 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) cc.addTraceEvent("created") - cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -492,7 +503,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -504,7 +515,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -540,13 +551,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID *channelz.Identifier + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -562,6 +587,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.pubSub.Publish(state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -591,7 +618,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -623,7 +650,7 @@ type ClientConn struct { channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idlenessManager + idlenessMgr idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -669,6 +696,19 @@ const ( ccIdlenessStateExitingIdle ) +func (s ccIdlenessState) String() string { + switch s { + case ccIdlenessStateActive: + return "active" + case ccIdlenessStateIdle: + return "idle" + case ccIdlenessStateExitingIdle: + return "exitingIdle" + default: + return "unknown" + } +} + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -760,6 +800,16 @@ func init() { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } + internal.EnterIdleModeForTesting = func(cc *ClientConn) error { + return cc.enterIdleMode() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.exitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -1047,8 +1097,8 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.cancel() ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) - // We have to defer here because GracefulClose => Close => onClose, which - // requires locking ac.mu. + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. if ac.transport != nil { defer ac.transport.GracefulClose() ac.transport = nil @@ -1153,23 +1203,13 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName - } + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB } cc.balancerWrapper.switchTo(newBalancerName) } @@ -1208,7 +1248,10 @@ func (cc *ClientConn) ResetConnectBackoff() { // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() cc.mu.Lock() if cc.conns == nil { @@ -1242,7 +1285,7 @@ func (cc *ClientConn) Close() error { rWrapper.close() } if idlenessMgr != nil { - idlenessMgr.close() + idlenessMgr.Close() } for ac := range conns { @@ -1352,12 +1395,14 @@ func (ac *addrConn) resetTransport() { if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + ac.mu.Lock() if acCtx.Err() != nil { + // addrConn was torn down. + ac.mu.Unlock() return } - ac.mu.Lock() + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1553,7 +1598,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1641,16 +1686,7 @@ func (ac *addrConn) tearDown(err error) { ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, @@ -1664,6 +1700,29 @@ func (ac *addrConn) tearDown(err error) { // being deleted right away. channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } func (ac *addrConn) getState() connectivity.State { diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go index 129776547..411e3dfd4 100644 --- a/vendor/google.golang.org/grpc/codec.go +++ b/vendor/google.golang.org/grpc/codec.go @@ -27,8 +27,8 @@ import ( // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 83e3bae37..c7cf1810a 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/altscontext.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 0b0093328..81d0f1140 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index c2e564c7d..69f094758 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/gcp/transport_security_common.proto diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 23ea95237..cfc9fd85e 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -139,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption { return &joinDialOption{opts: opts} } +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is @@ -630,6 +644,7 @@ func defaultDialOptions() dialOptions { UseProxy: true, }, recvBufferPool: nopBufferPool{}, + idleTimeout: 30 * time.Minute, } } @@ -666,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 07a586135..5ebf88d71 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) @@ -90,9 +85,9 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go index 3009b35af..0ee3d3bae 100644 --- a/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -37,7 +37,7 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v interface{}) ([]byte, error) { +func (codec) Marshal(v any) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { +func (codec) Unmarshal(data []byte, v any) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go index 8358dd6e2..ac73c9ced 100644 --- a/vendor/google.golang.org/grpc/grpclog/component.go +++ b/vendor/google.golang.org/grpc/grpclog/component.go @@ -31,71 +31,71 @@ type componentData struct { var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...interface{}) { +func (c *componentData) Info(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...interface{}) { +func (c *componentData) Warning(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...interface{}) { +func (c *componentData) Error(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...interface{}) { +func (c *componentData) Fatal(args ...any) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...interface{}) { +func (c *componentData) Infof(format string, args ...any) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...interface{}) { +func (c *componentData) Warningf(format string, args ...any) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...interface{}) { +func (c *componentData) Errorf(format string, args ...any) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...interface{}) { +func (c *componentData) Fatalf(format string, args ...any) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...interface{}) { +func (c *componentData) Infoln(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...interface{}) { +func (c *componentData) Warningln(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...interface{}) { +func (c *componentData) Errorln(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...interface{}) { +func (c *componentData) Fatalln(args ...any) { c.FatalDepth(1, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go index c8bb2be34..16928c9cb 100644 --- a/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -42,53 +42,53 @@ func V(l int) bool { } // Info logs to the INFO log. -func Info(args ...interface{}) { +func Info(args ...any) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { +func Infoln(args ...any) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { +func Warning(args ...any) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { +func Warningf(format string, args ...any) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { +func Warningln(args ...any) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { +func Error(args ...any) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { +func Errorln(args ...any) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { +func Fatal(args ...any) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { +func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { +func Print(args ...any) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { +func Printf(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { +func Println(args ...any) { grpclog.Logger.Infoln(args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go index ef06a4822..b1674d826 100644 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ type loggerWrapper struct { Logger } -func (g *loggerWrapper) Info(args ...interface{}) { +func (g *loggerWrapper) Info(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { +func (g *loggerWrapper) Infoln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { +func (g *loggerWrapper) Infof(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { +func (g *loggerWrapper) Warning(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { +func (g *loggerWrapper) Warningln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +func (g *loggerWrapper) Warningf(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...interface{}) { +func (g *loggerWrapper) Error(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...interface{}) { +func (g *loggerWrapper) Errorln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +func (g *loggerWrapper) Errorf(format string, args ...any) { g.Logger.Printf(format, args...) } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 5de66e40d..ecfd36d71 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -33,35 +33,35 @@ import ( // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...interface{}) { +func (g *loggerT) Info(args ...any) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...interface{}) { +func (g *loggerT) Infoln(args ...any) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...interface{}) { +func (g *loggerT) Infof(format string, args ...any) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...interface{}) { +func (g *loggerT) Warning(args ...any) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...interface{}) { +func (g *loggerT) Warningln(args ...any) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...interface{}) { +func (g *loggerT) Warningf(format string, args ...any) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...interface{}) { +func (g *loggerT) Error(args ...any) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...interface{}) { +func (g *loggerT) Errorln(args ...any) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...interface{}) { +func (g *loggerT) Errorf(format string, args ...any) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...interface{}) { +func (g *loggerT) Fatal(args ...any) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...interface{}) { +func (g *loggerT) Fatalln(args ...any) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...interface{}) { +func (g *loggerT) Fatalf(format string, args ...any) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool { type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go index bb96ef57b..877d78fc3 100644 --- a/vendor/google.golang.org/grpc/interceptor.go +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -23,7 +23,7 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } @@ -78,13 +78,13 @@ type UnaryServerInfo struct { // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -101,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3da..fed1c011a 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 08666f62a..3c594e6e4 100644 --- a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { } } -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { gsb.currentMu.Lock() defer gsb.currentMu.Unlock() gsb.mu.Lock() @@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { balToUpdate = gsb.balancerPending } - gsb.mu.Unlock() if balToUpdate == nil { // SubConn belonged to a stale lb policy that has not yet fully closed, // or the balancer was already closed. + gsb.mu.Unlock() return } - balToUpdate.UpdateSubConnState(sc, state) + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) } // Close closes any active child balancers. @@ -242,7 +255,7 @@ func (gsb *Balancer) Close() { // // It implements the balancer.ClientConn interface and is passed down in that // capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/remove +// the wrapped balancer and calls from the latter to create/update/shutdown // SubConns update this set before being forwarded to the parent ClientConn. // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. @@ -254,21 +267,10 @@ type balancerWrapper struct { subconns map[balancer.SubConn]bool // subconns created by this balancer } -func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if state.ConnectivityState == connectivity.Shutdown { - bw.gsb.mu.Lock() - delete(bw.subconns, sc) - bw.gsb.mu.Unlock() - } - // There is no need to protect this read with a mutex, as the write to the - // Balancer field happens in SwitchTo, which completes before this can be - // called. - bw.Balancer.UpdateSubConnState(sc, state) -} - -// Close closes the underlying LB policy and removes the subconns it created. bw -// must not be referenced via balancerCurrent or balancerPending in gsb when -// called. gsb.mu must not be held. Does not panic with a nil receiver. +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. func (bw *balancerWrapper) Close() { // before Close is called. if bw == nil { @@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() { bw.Balancer.Close() bw.gsb.mu.Lock() for sc := range bw.subconns { - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() } bw.gsb.mu.Unlock() } @@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne } bw.gsb.mu.Unlock() + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } sc, err := bw.gsb.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } bw.gsb.mu.Lock() if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() bw.gsb.mu.Unlock() return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) } @@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { } func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return - } - bw.gsb.mu.Unlock() - bw.gsb.cc.RemoveSubConn(sc) + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() } func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go index 3a905d966..94a08d687 100644 --- a/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 6c3f63221..0f31274a3 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -230,7 +230,7 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { @@ -270,7 +270,7 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 81c2f5fd7..4399c3df4 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -28,25 +28,25 @@ import "sync" // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `interface{}`. This means that a call to Put() incurs an extra memory -// allocation, and also that users need a type assertion while reading. For -// performance critical code paths, using Unbounded is strongly discouraged and -// defining a new type specific implementation of this buffer is preferred. See +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan interface{} + c chan any closed bool mu sync.Mutex - backlog []interface{} + backlog []any } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan interface{}, 1)} + return &Unbounded{c: make(chan any, 1)} } // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t interface{}) { +func (b *Unbounded) Put(t any) { b.mu.Lock() defer b.mu.Unlock() if b.closed { @@ -89,7 +89,7 @@ func (b *Unbounded) Load() { // // If the unbounded buffer is closed, the read channel returned by this method // is closed. -func (b *Unbounded) Get() <-chan interface{} { +func (b *Unbounded) Get() <-chan any { return b.c } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 777cbcd79..5395e7752 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,9 +24,7 @@ package channelz import ( - "context" "errors" - "fmt" "sort" "sync" "sync/atomic" @@ -40,8 +38,11 @@ const ( ) var ( - db dbWrapper - idGen idGenerator + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -52,14 +53,14 @@ var ( func TurnOn() { if !IsOn() { db.set(newChannelMap()) - idGen.reset() + IDGen.Reset() atomic.StoreInt32(&curState, 1) } } // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) + return atomic.LoadInt32(&curState) == 1 } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -97,43 +98,6 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorageForTesting initializes channelz data storage and id -// generator for testing purposes. -// -// Returns a cleanup function to be invoked by the test, which waits for up to -// 10s for all channelz state to be reset by the grpc goroutines when those -// entities get closed. This cleanup function helps with ensuring that tests -// don't mess up each other. -func NewChannelzStorageForTesting() (cleanup func() error) { - db.set(newChannelMap()) - idGen.reset() - - return func() error { - cm := db.get() - if cm == nil { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - cm.mu.RLock() - topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) - cm.mu.RUnlock() - - if err := ctx.Err(); err != nil { - return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) - } - if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { - return nil - } - <-ticker.C - } - } -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -193,7 +157,7 @@ func GetServer(id int64) *ServerMetric { // // If channelz is not turned ON, the channelz database is not mutated. func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() var parent int64 isTopChannel := true if pid != nil { @@ -229,7 +193,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er if pid == nil { return nil, errors.New("a SubChannel's parent id cannot be nil") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefSubChannel, id, pid), nil } @@ -251,7 +215,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er // // If channelz is not turned ON, the channelz database is not mutated. func RegisterServer(s Server, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefServer, id, nil) } @@ -277,7 +241,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a ListenSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefListenSocket, id, pid), nil } @@ -297,7 +261,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a NormalSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefNormalSocket, id, pid), nil } @@ -776,14 +740,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { return sm } -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go index 8e13a3d2c..f89e6f77b 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -31,7 +31,7 @@ func withParens(id *Identifier) string { } // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtInfo, @@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, @@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, @@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, @@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, @@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtError, diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index 7b2f350e2..1d4020f53 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -628,6 +628,7 @@ type tracedChannel interface { type channelTrace struct { cm *channelMap + clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { } func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index 8d194e44e..98288c3f8 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -23,7 +23,7 @@ import ( ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { +func GetSocketOption(socket any) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 837ddc402..b5568b22e 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { +func GetSocketOption(c any) *SocketOptionData { return nil } diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 32c9b5903..9deee7f65 100644 --- a/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -25,12 +25,12 @@ import ( type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) interface{} { +func RequestInfoFromContext(ctx context.Context) any { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +func ClientHandshakeInfoFromContext(ctx context.Context) any { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 77c2c0b89..3cf10ddfb 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -37,9 +37,12 @@ var ( // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy, which can be enabled by setting the environment - // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) + // pick_first LB policy. + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", true) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index b68e26a36..bfc45102a 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -30,7 +30,7 @@ var Logger LoggerV2 var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...interface{}) { +func InfoDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...interface{}) { +func WarningDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...interface{}) { +func ErrorDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...interface{}) { +func FatalDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -116,11 +116,11 @@ type LoggerV2 interface { // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 02224b42c..faa998de7 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -31,7 +31,7 @@ type PrefixLogger struct { } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +func (pl *PrefixLogger) Infof(format string, args ...any) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +func (pl *PrefixLogger) Warningf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +func (pl *PrefixLogger) Errorf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +func (pl *PrefixLogger) Debugf(format string, args ...any) { // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe // rewrite PrefixLogger a little to ensure that we don't use the global // `Logger` here, and instead use the `logger` field. diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 37b8d4117..900917dbe 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -32,10 +32,10 @@ import ( // // This type is safe for concurrent access. type CallbackSerializer struct { - // Done is closed once the serializer is shut down completely, i.e all + // done is closed once the serializer is shut down completely, i.e all // scheduled callbacks are executed and the serializer has deallocated all // its resources. - Done chan struct{} + done chan struct{} callbacks *buffer.Unbounded closedMu sync.Mutex @@ -48,12 +48,12 @@ type CallbackSerializer struct { // callbacks will be added once this context is canceled, and any pending un-run // callbacks will be executed before the serializer is shut down. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{ - Done: make(chan struct{}), + cs := &CallbackSerializer{ + done: make(chan struct{}), callbacks: buffer.NewUnbounded(), } - go t.run(ctx) - return t + go cs.run(ctx) + return cs } // Schedule adds a callback to be scheduled after existing callbacks are run. @@ -64,56 +64,62 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // Return value indicates if the callback was successfully added to the list of // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - t.closedMu.Lock() - defer t.closedMu.Unlock() +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + cs.closedMu.Lock() + defer cs.closedMu.Unlock() - if t.closed { + if cs.closed { return false } - t.callbacks.Put(f) + cs.callbacks.Put(f) return true } -func (t *CallbackSerializer) run(ctx context.Context) { +func (cs *CallbackSerializer) run(ctx context.Context) { var backlog []func(context.Context) - defer close(t.Done) + defer close(cs.done) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-t.callbacks.Get(): + case callback, ok := <-cs.callbacks.Get(): if !ok { return } - t.callbacks.Load() + cs.callbacks.Load() callback.(func(ctx context.Context))(ctx) } } // Fetch pending callbacks if any, and execute them before returning from - // this method and closing t.Done. - t.closedMu.Lock() - t.closed = true - backlog = t.fetchPendingCallbacks() - t.callbacks.Close() - t.closedMu.Unlock() + // this method and closing cs.done. + cs.closedMu.Lock() + cs.closed = true + backlog = cs.fetchPendingCallbacks() + cs.callbacks.Close() + cs.closedMu.Unlock() for _, b := range backlog { b(ctx) } } -func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { +func (cs *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { var backlog []func(context.Context) for { select { - case b := <-t.callbacks.Get(): + case b := <-cs.callbacks.Get(): backlog = append(backlog, b.(func(context.Context))) - t.callbacks.Load() + cs.callbacks.Load() default: return backlog } } } + +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index f58b5ffa6..aef8cec1a 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -29,7 +29,7 @@ import ( type Subscriber interface { // OnMessage is invoked when a new message is published. Implementations // must not block in this method. - OnMessage(msg interface{}) + OnMessage(msg any) } // PubSub is a simple one-to-many publish-subscribe system that supports @@ -40,25 +40,23 @@ type Subscriber interface { // subscribers interested in receiving these messages register a callback // via the Subscribe() method. // -// Once a PubSub is stopped, no more messages can be published, and -// it is guaranteed that no more subscriber callback will be invoked. +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. type PubSub struct { - cs *CallbackSerializer - cancel context.CancelFunc + cs *CallbackSerializer // Access to the below fields are guarded by this mutex. mu sync.Mutex - msg interface{} + msg any subscribers map[Subscriber]bool - stopped bool } -// NewPubSub returns a new PubSub instance. -func NewPubSub() *PubSub { - ctx, cancel := context.WithCancel(context.Background()) +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { return &PubSub{ cs: NewCallbackSerializer(ctx), - cancel: cancel, subscribers: map[Subscriber]bool{}, } } @@ -75,10 +73,6 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { ps.mu.Lock() defer ps.mu.Unlock() - if ps.stopped { - return func() {} - } - ps.subscribers[sub] = true if ps.msg != nil { @@ -102,14 +96,10 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { // Publish publishes the provided message to the PubSub, and invokes // callbacks registered by subscribers asynchronously. -func (ps *PubSub) Publish(msg interface{}) { +func (ps *PubSub) Publish(msg any) { ps.mu.Lock() defer ps.mu.Unlock() - if ps.stopped { - return - } - ps.msg = msg for sub := range ps.subscribers { s := sub @@ -124,13 +114,8 @@ func (ps *PubSub) Publish(msg interface{}) { } } -// Stop shuts down the PubSub and releases any resources allocated by it. -// It is guaranteed that no subscriber callbacks would be invoked once this -// method returns. -func (ps *PubSub) Stop() { - ps.mu.Lock() - defer ps.mu.Unlock() - ps.stopped = true - - ps.cancel() +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() } diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go similarity index 61% rename from vendor/google.golang.org/grpc/idle.go rename to vendor/google.golang.org/grpc/internal/idle/idle.go index dc3dc72f6..6c272476e 100644 --- a/vendor/google.golang.org/grpc/idle.go +++ b/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -16,7 +16,9 @@ * */ -package grpc +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle import ( "fmt" @@ -24,6 +26,8 @@ import ( "sync" "sync/atomic" "time" + + "google.golang.org/grpc/grpclog" ) // For overriding in unit tests. @@ -31,31 +35,31 @@ var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { return time.AfterFunc(d, f) } -// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter +// Enforcer is the functionality provided by grpc.ClientConn to enter // and exit from idle mode. -type idlenessEnforcer interface { - exitIdleMode() error - enterIdleMode() error +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() error } -// idlenessManager defines the functionality required to track RPC activity on a +// Manager defines the functionality required to track RPC activity on a // channel. -type idlenessManager interface { - onCallBegin() error - onCallEnd() - close() +type Manager interface { + OnCallBegin() error + OnCallEnd() + Close() } -type noopIdlenessManager struct{} +type noopManager struct{} -func (noopIdlenessManager) onCallBegin() error { return nil } -func (noopIdlenessManager) onCallEnd() {} -func (noopIdlenessManager) close() {} +func (noopManager) OnCallBegin() error { return nil } +func (noopManager) OnCallEnd() {} +func (noopManager) Close() {} -// idlenessManagerImpl implements the idlenessManager interface. It uses atomic -// operations to synchronize access to shared state and a mutex to guarantee -// mutual exclusion in a critical section. -type idlenessManagerImpl struct { +// manager implements the Manager interface. It uses atomic operations to +// synchronize access to shared state and a mutex to guarantee mutual exclusion +// in a critical section. +type manager struct { // State accessed atomically. lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. @@ -64,14 +68,15 @@ type idlenessManagerImpl struct { // Can be accessed without atomics or mutex since these are set at creation // time and read-only after that. - enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + logger grpclog.LoggerV2 // idleMu is used to guarantee mutual exclusion in two scenarios: // - Opposing intentions: // - a: Idle timeout has fired and handleIdleTimeout() is trying to put // the channel in idle mode because the channel has been inactive. - // - b: At the same time an RPC is made on the channel, and onCallBegin() + // - b: At the same time an RPC is made on the channel, and OnCallBegin() // is trying to prevent the channel from going idle. // - Competing intentions: // - The channel is in idle mode and there are multiple RPCs starting at @@ -83,28 +88,37 @@ type idlenessManagerImpl struct { timer *time.Timer } -// newIdlenessManager creates a new idleness manager implementation for the +// ManagerOptions is a collection of options used by +// NewManager. +type ManagerOptions struct { + Enforcer Enforcer + Timeout time.Duration + Logger grpclog.LoggerV2 +} + +// NewManager creates a new idleness manager implementation for the // given idle timeout. -func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { - if idleTimeout == 0 { - return noopIdlenessManager{} +func NewManager(opts ManagerOptions) Manager { + if opts.Timeout == 0 { + return noopManager{} } - i := &idlenessManagerImpl{ - enforcer: enforcer, - timeout: int64(idleTimeout), + m := &manager{ + enforcer: opts.Enforcer, + timeout: int64(opts.Timeout), + logger: opts.Logger, } - i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) - return i + m.timer = timeAfterFunc(opts.Timeout, m.handleIdleTimeout) + return m } // resetIdleTimer resets the idle timer to the given duration. This method // should only be called from the timer callback. -func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if i.timer == nil { + if m.timer == nil { // Only close sets timer to nil. We are done. return } @@ -112,47 +126,47 @@ func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { // It is safe to ignore the return value from Reset() because this method is // only ever called from the timer callback, which means the timer has // already fired. - i.timer.Reset(d) + m.timer.Reset(d) } // handleIdleTimeout is the timer callback that is invoked upon expiry of the // configured idle timeout. The channel is considered inactive if there are no // ongoing calls and no RPC activity since the last time the timer fired. -func (i *idlenessManagerImpl) handleIdleTimeout() { - if i.isClosed() { +func (m *manager) handleIdleTimeout() { + if m.isClosed() { return } - if atomic.LoadInt32(&i.activeCallsCount) > 0 { - i.resetIdleTimer(time.Duration(i.timeout)) + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(time.Duration(m.timeout)) return } // There has been activity on the channel since we last got here. Reset the // timer and return. - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { // Set the timer to fire after a duration of idle timeout, calculated // from the time the most recent RPC completed. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) - i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime) + m.timeout - time.Now().UnixNano())) return } // This CAS operation is extremely likely to succeed given that there has // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the + // activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() that the // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { // This CAS operation can fail if an RPC started after we checked for // activity at the top of this method, or one was ongoing from before // the last time we were here. In both case, reset the timer and return. - i.resetIdleTimer(time.Duration(i.timeout)) + m.resetIdleTimer(time.Duration(m.timeout)) return } // Now that we've set the active calls count to -math.MaxInt32, it's time to // actually move to idle mode. - if i.tryEnterIdleMode() { + if m.tryEnterIdleMode() { // Successfully entered idle mode. No timer needed until we exit idle. return } @@ -160,8 +174,8 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { // Failed to enter idle mode due to a concurrent RPC that kept the channel // active, or because of an error from the channel. Undo the attempt to // enter idle, and reset the timer to try again later. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.resetIdleTimer(time.Duration(i.timeout)) + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.resetIdleTimer(time.Duration(m.timeout)) } // tryEnterIdleMode instructs the channel to enter idle mode. But before @@ -171,15 +185,15 @@ func (i *idlenessManagerImpl) handleIdleTimeout() { // Return value indicates whether or not the channel moved to idle mode. // // Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (i *idlenessManagerImpl) tryEnterIdleMode() bool { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) tryEnterIdleMode() bool { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { // We raced and lost to a new RPC. Very rare, but stop entering idle. return false } - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { // An very short RPC could have come in (and also finished) after we // checked for calls count and activity in handleIdleTimeout(), but // before the CAS operation. So, we need to check for activity again. @@ -189,99 +203,99 @@ func (i *idlenessManagerImpl) tryEnterIdleMode() bool { // No new RPCs have come in since we last set the active calls count value // -math.MaxInt32 in the timer callback. And since we have the lock, it is // safe to enter idle mode now. - if err := i.enforcer.enterIdleMode(); err != nil { - logger.Errorf("Failed to enter idle mode: %v", err) + if err := m.enforcer.EnterIdleMode(); err != nil { + m.logger.Errorf("Failed to enter idle mode: %v", err) return false } // Successfully entered idle mode. - i.actuallyIdle = true + m.actuallyIdle = true return true } -// onCallBegin is invoked at the start of every RPC. -func (i *idlenessManagerImpl) onCallBegin() error { - if i.isClosed() { +// OnCallBegin is invoked at the start of every RPC. +func (m *manager) OnCallBegin() error { + if m.isClosed() { return nil } - if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { // Channel is not idle now. Set the activity bit and allow the call. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) return nil } // Channel is either in idle mode or is in the process of moving to idle // mode. Attempt to exit idle mode to allow this RPC. - if err := i.exitIdleMode(); err != nil { + if err := m.exitIdleMode(); err != nil { // Undo the increment to calls count, and return an error causing the // RPC to fail. - atomic.AddInt32(&i.activeCallsCount, -1) + atomic.AddInt32(&m.activeCallsCount, -1) return err } - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) return nil } // exitIdleMode instructs the channel to exit idle mode. // // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (i *idlenessManagerImpl) exitIdleMode() error { - i.idleMu.Lock() - defer i.idleMu.Unlock() +func (m *manager) exitIdleMode() error { + m.idleMu.Lock() + defer m.idleMu.Unlock() - if !i.actuallyIdle { + if !m.actuallyIdle { // This can happen in two scenarios: // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called // tryEnterIdleMode(). But before the latter could grab the lock, an RPC - // came in and onCallBegin() noticed that the calls count is negative. + // came in and OnCallBegin() noticed that the calls count is negative. // - Channel is in idle mode, and multiple new RPCs come in at the same - // time, all of them notice a negative calls count in onCallBegin and get + // time, all of them notice a negative calls count in OnCallBegin and get // here. The first one to get the lock would got the channel to exit idle. // // Either way, nothing to do here. return nil } - if err := i.enforcer.exitIdleMode(); err != nil { + if err := m.enforcer.ExitIdleMode(); err != nil { return fmt.Errorf("channel failed to exit idle mode: %v", err) } // Undo the idle entry process. This also respects any new RPC attempts. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.actuallyIdle = false + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false // Start a new timer to fire after the configured idle timeout. - i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) + m.timer = timeAfterFunc(time.Duration(m.timeout), m.handleIdleTimeout) return nil } -// onCallEnd is invoked at the end of every RPC. -func (i *idlenessManagerImpl) onCallEnd() { - if i.isClosed() { +// OnCallEnd is invoked at the end of every RPC. +func (m *manager) OnCallEnd() { + if m.isClosed() { return } // Record the time at which the most recent call finished. - atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) // Decrement the active calls count. This count can temporarily go negative // when the timer callback is in the process of moving the channel to idle // mode, but one or more RPCs come in and complete before the timer callback // can get done with the process of moving to idle mode. - atomic.AddInt32(&i.activeCallsCount, -1) + atomic.AddInt32(&m.activeCallsCount, -1) } -func (i *idlenessManagerImpl) isClosed() bool { - return atomic.LoadInt32(&i.closed) == 1 +func (m *manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 } -func (i *idlenessManagerImpl) close() { - atomic.StoreInt32(&i.closed, 1) +func (m *manager) Close() { + atomic.StoreInt32(&m.closed, 1) - i.idleMu.Lock() - i.timer.Stop() - i.timer = nil - i.idleMu.Unlock() + m.idleMu.Lock() + m.timer.Stop() + m.timer = nil + m.idleMu.Unlock() } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 42ff39c84..0d94c63e0 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -30,7 +30,7 @@ import ( var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,8 +38,12 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + ParseServiceConfig any // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the @@ -49,33 +53,33 @@ var ( // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + GetCertificateProviderBuilder any // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - CanonicalString interface{} // func (codes.Code) string + CanonicalString any // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. - DrainServerTransports interface{} // func(*grpc.Server, string) + DrainServerTransports any // func(*grpc.Server, string) // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalServerOptions interface{} // func(opt ...ServerOption) + AddGlobalServerOptions any // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. // @@ -88,14 +92,14 @@ var ( // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalDialOptions interface{} // func(opt ...DialOption) + AddGlobalDialOptions any // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - DisableGlobalDialOptions interface{} // func() grpc.DialOption + DisableGlobalDialOptions any // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. // @@ -104,23 +108,26 @@ var ( ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. - JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. - JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -131,7 +138,7 @@ var ( // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment @@ -163,7 +170,17 @@ var ( UnregisterRBACHTTPFilterForTesting func() // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) error + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -174,7 +191,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go index c82e608e0..900bfb716 100644 --- a/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") type mdValue metadata.MD -func (m mdValue) Equal(o interface{}) bool { +func (m mdValue) Equal(o any) bool { om, ok := o.(mdValue) if !ok { return false diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 0177af4b5..703319137 100644 --- a/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -35,7 +35,7 @@ const jsonIndent = " " // ToJSON marshals the input into a json string. // // If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e interface{}) string { +func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: mm := jsonpb.Marshaler{Indent: jsonIndent} diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index c7a18a948..f0603871c 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -92,7 +92,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientInterceptor is an interceptor for gRPC client streams. diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index b0ead4f54..03ef2fedd 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,13 +43,41 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -64,7 +92,7 @@ func Err(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -120,11 +148,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { +func (s *Status) Details() []any { if s == nil || s.s == nil { return nil } - details := make([]interface{}, 0, len(s.s.Details)) + details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index be5a9c81e..b330ccedc 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it interface{} + it any next *itemNode } @@ -49,7 +49,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo return true, nil } -func (c *controlBuffer) get(block bool) (interface{}, error) { +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() if c.err != nil { @@ -830,7 +830,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: l.incomingWindowUpdateHandler(i) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3fa..17f7a21b5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -220,18 +220,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +245,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +290,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,7 +347,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. ctx := ht.req.Context() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 326bf0848..d6f5c4935 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -330,7 +330,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -762,7 +762,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it interface{}) bool { + checkForStreamQuota := func(it any) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -800,7 +800,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -815,7 +815,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + success, err := t.controlBuf.executeAndPut(func(it any) bool { return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { @@ -927,7 +927,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func(any) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -1080,7 +1080,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(interface{}) bool { + updateIWS := func(any) bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1233,7 +1233,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -1399,7 +1399,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1433,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1505,14 +1498,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - isHeader := false - - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1520,15 +1514,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(mdata) > 0 { s.header = mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) } - close(s.headerChan) } for _, sh := range t.statsHandlers { - if isHeader { + if !endStream { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), @@ -1550,13 +1541,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index f96064012..6fa1eb419 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -165,21 +165,16 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, Val: http2MaxFrameLen, }} - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - maxStreams := config.MaxStreams - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { + if config.MaxStreams != math.MaxUint32 { isettings = append(isettings, http2.Setting{ ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, + Val: config.MaxStreams, }) } dynamicWindow := true @@ -258,7 +253,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, framer: framer, readerDone: make(chan struct{}), writerDone: make(chan struct{}), - maxStreams: maxStreams, + maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, state: reachable, @@ -347,7 +342,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -566,7 +561,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -597,7 +592,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) for _, sh := range t.stats { s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ @@ -635,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (t *http2Server) HandleStreams(handle func(*Stream)) { defer close(t.readerDone) for { t.controlBuf.throttle() @@ -670,7 +664,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(frame, handle); err != nil { t.Close(err) break } @@ -855,7 +849,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -939,7 +933,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -1058,12 +1052,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 19cbb18f5..dc29d590e 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -30,15 +30,13 @@ import ( "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -87,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -102,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -153,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( @@ -309,6 +296,7 @@ func decodeGrpcMessageUnchecked(msg string) string { } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int @@ -316,12 +304,17 @@ type bufWriter struct { err error } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) + } + return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -332,19 +325,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { n, err = w.conn.Write(b) return n, toIOError(err) } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.Flush() + err = w.flushKeepBuffer() } } return n, err } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } @@ -381,7 +389,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -389,7 +400,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -403,6 +418,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList return f } +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index aa1c89659..aac056e72 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -43,10 +43,6 @@ import ( "google.golang.org/grpc/tap" ) -// ErrNoHeaders is used as a signal that a trailers only response was received, -// and is not a real error. -var ErrNoHeaders = errors.New("stream has no headers") - const logLevel = 2 type bufferPool struct { @@ -56,7 +52,7 @@ type bufferPool struct { func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -390,14 +386,10 @@ func (s *Stream) Header() (metadata.MD, error) { } s.waitOnHeader() - if !s.headerValid { + if !s.headerValid || s.noHeaders { return nil, s.status.Err() } - if s.noHeaders { - return nil, ErrNoHeaders - } - return s.header.Copy(), nil } @@ -559,6 +551,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int + SharedWriteBuffer bool ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 @@ -592,6 +585,8 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. @@ -703,7 +698,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -736,7 +731,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 02f975951..236837f41 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -28,21 +28,26 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - idle bool - blockingCh chan struct{} - picker balancer.Picker + mu sync.Mutex + done bool + idle bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls } -func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{})} +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. @@ -95,6 +100,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var ch chan struct{} var lastPickErr error + for { pw.mu.Lock() if pw.done { @@ -129,6 +135,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. continue } + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + ch = pw.blockingCh p := pw.picker pw.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index abe266b02..2e9cf66b4 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -26,12 +26,18 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -40,7 +46,9 @@ func newPickfirstBuilder() balancer.Builder { type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b } func (*pickfirstBuilder) Name() string { @@ -57,23 +65,36 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - cfg := &pfConfig{} - if err := json.Unmarshal(js, cfg); err != nil { + if !envconfig.PickFirstLBConfig { + // Prior to supporting loadbalancing configuration, the pick_first LB + // policy did not implement the balancer.ConfigParser interface. This + // meant that if a non-empty configuration was passed to it, the service + // config unmarshaling code would throw a warning log, but would + // continue using the pick_first LB policy. The code below ensures the + // same behavior is retained if the env var is not set. + if string(js) != "{}" { + logger.Warningf("Ignoring non-empty balancer configuration %q for the pick_first LB policy", string(js)) + } + return nil, nil + } + + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) } return cfg, nil } type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger state connectivity.State cc balancer.ClientConn subConn balancer.SubConn - cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -96,35 +117,44 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { - // Remove the old subConn. All addresses were removed, so it is no longer - // valid. - b.cc.RemoveSubConn(b.subConn) + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() b.subConn = nil } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if state.BalancerConfig != nil { - cfg, ok := state.BalancerConfig.(*pfConfig) - if !ok { - return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) - } - b.cfg = cfg + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - - if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -143,13 +173,19 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return nil } +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) } if b.subConn != subConn { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") } return } diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index cd4554785..73bd63364 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -37,7 +37,7 @@ type PreparedMsg struct { } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go new file mode 100644 index 000000000..0a4262342 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -0,0 +1,119 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package manual defines a resolver that can be used to manually send resolved +// addresses to ClientConn. +package manual + +import ( + "sync" + + "google.golang.org/grpc/resolver" +) + +// NewBuilderWithScheme creates a new manual resolver builder with the given +// scheme. Every instance of the manual resolver may only ever be used with a +// single grpc.ClientConn. Otherwise, bad things will happen. +func NewBuilderWithScheme(scheme string) *Resolver { + return &Resolver{ + BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, + UpdateStateCallback: func(error) {}, + ResolveNowCallback: func(resolver.ResolveNowOptions) {}, + CloseCallback: func() {}, + scheme: scheme, + } +} + +// Resolver is also a resolver builder. +// It's build() function always returns itself. +type Resolver struct { + // BuildCallback is called when the Build method is called. Must not be + // nil. Must not be changed after the resolver may be built. + BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) + // UpdateStateCallback is called when the UpdateState method is called on + // the resolver. The value passed as argument to this callback is the value + // returned by the resolver.ClientConn. Must not be nil. Must not be + // changed after the resolver may be built. + UpdateStateCallback func(err error) + // ResolveNowCallback is called when the ResolveNow method is called on the + // resolver. Must not be nil. Must not be changed after the resolver may + // be built. + ResolveNowCallback func(resolver.ResolveNowOptions) + // CloseCallback is called when the Close method is called. Must not be + // nil. Must not be changed after the resolver may be built. + CloseCallback func() + scheme string + + // Fields actually belong to the resolver. + // Guards access to below fields. + mu sync.Mutex + CC resolver.ClientConn + // Storing the most recent state update makes this resolver resilient to + // restarts, which is possible with channel idleness. + lastSeenState *resolver.State +} + +// InitialState adds initial state to the resolver so that UpdateState doesn't +// need to be explicitly called after Dial. +func (r *Resolver) InitialState(s resolver.State) { + r.lastSeenState = &s +} + +// Build returns itself for Resolver, because it's both a builder and a resolver. +func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.BuildCallback(target, cc, opts) + r.mu.Lock() + r.CC = cc + if r.lastSeenState != nil { + err := r.CC.UpdateState(*r.lastSeenState) + go r.UpdateStateCallback(err) + } + r.mu.Unlock() + return r, nil +} + +// Scheme returns the manual resolver's scheme. +func (r *Resolver) Scheme() string { + return r.scheme +} + +// ResolveNow is a noop for Resolver. +func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) { + r.ResolveNowCallback(o) +} + +// Close is a noop for Resolver. +func (r *Resolver) Close() { + r.CloseCallback() +} + +// UpdateState calls CC.UpdateState. +func (r *Resolver) UpdateState(s resolver.State) { + r.mu.Lock() + err := r.CC.UpdateState(s) + r.lastSeenState = &s + r.mu.Unlock() + r.UpdateStateCallback(err) +} + +// ReportError calls CC.ReportError. +func (r *Resolver) ReportError(err error) { + r.mu.Lock() + r.CC.ReportError(err) + r.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go index efcb7f3ef..804be887d 100644 --- a/vendor/google.golang.org/grpc/resolver/map.go +++ b/vendor/google.golang.org/grpc/resolver/map.go @@ -20,7 +20,7 @@ package resolver type addressMapEntry struct { addr Address - value interface{} + value any } // AddressMap is a map of addresses to arbitrary values taking into account @@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { +func (a *AddressMap) Get(addr Address) (value any, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value interface{}) { +func (a *AddressMap) Set(addr Address, value any) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []interface{} { - ret := make([]interface{}, 0, a.Len()) +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index d8db6f5d3..11384e228 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -77,25 +77,6 @@ func GetDefaultScheme() string { return defaultScheme } -// AddressType indicates the address type returned by name resolution. -// -// Deprecated: use Attributes in Address instead. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - // - // Deprecated: use Attributes in Address instead. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - // - // Deprecated: to select the GRPCLB load balancing policy, use a service - // config with a corresponding loadBalancingConfig. To supply balancer - // addresses to the GRPCLB load balancing policy, set State.Attributes - // using balancer/grpclb/state.Set. - GRPCLB -) - // Address represents a server the client connects to. // // # Experimental @@ -111,9 +92,6 @@ type Address struct { // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // - // If Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -126,18 +104,16 @@ type Address struct { // BalancerAttributes contains arbitrary data about this address intended // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. - BalancerAttributes *attributes.Attributes - - // Type is the type of this address. // - // Deprecated: use Attributes instead. - Type AddressType + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata interface{} + Metadata any } // Equal returns whether a and o are identical. Metadata is compared directly, @@ -150,7 +126,7 @@ func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Type == o.Type && a.Metadata == o.Metadata + a.Metadata == o.Metadata } // String returns JSON formatted string representation of the address. @@ -194,11 +170,37 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) } +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -258,15 +260,6 @@ type ClientConn interface { // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. -// -// Examples: -// -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an @@ -321,10 +314,3 @@ type Resolver interface { // Close closes the resolver. Close() } - -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) -} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index b408b3688..d68330560 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -133,7 +133,7 @@ func (ccr *ccResolverWrapper) close() { ccr.mu.Unlock() // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done + <-ccr.serializer.Done() // Spawn a goroutine to close the resolver (since it may block trying to // cleanup all allocated resources) and return early. @@ -152,6 +152,14 @@ func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) // which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { errCh := make(chan error, 1) + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } ok := ccr.serializer.Schedule(func(context.Context) { ccr.addChannelzTraceEvent(s) ccr.curState = s diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index a844d28f4..b7723aa09 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -75,7 +75,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) @@ -626,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -693,7 +693,7 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { return hdr, data } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, @@ -792,7 +792,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err @@ -863,19 +863,22 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) + return errContextDeadline case context.Canceled: - return status.Error(codes.Canceled, err.Error()) + return errContextCanceled case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index e076ec714..8f60d4214 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -86,7 +86,7 @@ func init() { var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -99,26 +99,20 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl interface{} + serviceImpl any methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata interface{} -} - -type serverWorkerData struct { - st transport.ServerTransport - wg *sync.WaitGroup - stream *transport.Stream + mdata any } // Server is a gRPC server to serve RPC requests. @@ -145,7 +139,7 @@ type Server struct { channelzID *channelz.Identifier czData *channelzData - serverWorkerChannel chan *serverWorkerData + serverWorkerChannel chan func() } type serverOptions struct { @@ -170,6 +164,7 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 @@ -178,6 +173,7 @@ type serverOptions struct { } var defaultServerOptions = serverOptions{ + maxConcurrentStreams: math.MaxUint32, maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, maxSendMessageSize: defaultServerMaxSendMessageSize, connectionTimeout: 120 * time.Second, @@ -235,6 +231,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + // WriteBufferSize determines how much data can be batched before doing a write // on the wire. The corresponding memory allocation for this buffer will be // twice the size to keep syscalls low. The default value for this buffer is @@ -275,9 +285,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -389,6 +399,9 @@ func MaxSendMsgSize(m int) ServerOption { // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number // of concurrent streams to each ServerTransport. func MaxConcurrentStreams(n uint32) ServerOption { + if n == 0 { + n = math.MaxUint32 + } return newFuncServerOption(func(o *serverOptions) { o.maxConcurrentStreams = n }) @@ -590,24 +603,19 @@ const serverWorkerResetThreshold = 1 << 16 // [1] https://github.com/golang/go/issues/18138 func (s *Server) serverWorker() { for completed := 0; completed < serverWorkerResetThreshold; completed++ { - data, ok := <-s.serverWorkerChannel + f, ok := <-s.serverWorkerChannel if !ok { return } - s.handleSingleStream(data) + f() } go s.serverWorker() } -func (s *Server) handleSingleStream(data *serverWorkerData) { - defer data.wg.Done() - s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream)) -} - // initServerWorkers creates worker goroutines and a channel to process incoming // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { - s.serverWorkerChannel = make(chan *serverWorkerData) + s.serverWorkerChannel = make(chan func()) for i := uint32(0); i < s.opts.numServerWorkers; i++ { go s.serverWorker() } @@ -655,7 +663,7 @@ func NewServer(opt ...ServerOption) *Server { // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -663,7 +671,7 @@ func (s *Server) printf(format string, a ...interface{}) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } @@ -678,14 +686,14 @@ type ServiceRegistrar interface { // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl interface{}) + RegisterService(desc *ServiceDesc, impl any) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -696,7 +704,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -737,7 +745,7 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -938,6 +946,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -966,27 +975,26 @@ func (s *Server) serveStreams(st transport.ServerTransport) { defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) st.HandleStreams(func(stream *transport.Stream) { wg.Add(1) + + streamQuota.acquire() + f := func() { + defer streamQuota.release() + defer wg.Done() + s.handleStream(st, stream) + } + if s.opts.numServerWorkers > 0 { - data := &serverWorkerData{st: st, wg: &wg, stream: stream} select { - case s.serverWorkerChannel <- data: + case s.serverWorkerChannel <- f: return default: // If all stream workers are busy, fallback to the default code path. } } - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) + go f() }) wg.Wait() } @@ -1035,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.serveStreams(st) } -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo -} - func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() @@ -1119,7 +1103,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1138,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1166,7 +1150,7 @@ func chainUnaryServerInterceptors(s *Server) { } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) } } @@ -1175,12 +1159,12 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info if curr == len(interceptors)-1 { return finalHandler } - return func(ctx context.Context, req interface{}) (interface{}, error) { + return func(ctx context.Context, req any) (any, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1194,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1212,7 +1196,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() @@ -1226,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1248,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1329,12 +1312,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1348,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1356,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1381,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1389,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1404,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1431,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1446,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1465,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1493,7 +1476,7 @@ func chainStreamServerInterceptors(s *Server) { } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) } } @@ -1502,12 +1485,12 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf if curr == len(interceptors)-1 { return finalHandler } - return func(srv interface{}, stream ServerStream) error { + return func(srv any, stream ServerStream) error { return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1521,10 +1504,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, @@ -1543,7 +1526,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1560,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1602,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1646,7 +1629,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server interface{} + var server any if info != nil { server = info.serviceImpl } @@ -1680,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1698,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.RemoteAddr(), + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } @@ -1734,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1753,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } @@ -2075,3 +2075,34 @@ func validateSendCompressor(name, clientCompressors string) error { } return fmt.Errorf("client does not support compressor %q", name) } + +// atomicSemaphore implements a blocking, counting semaphore. acquire should be +// called synchronously; release may be called asynchronously. +type atomicSemaphore struct { + n atomic.Int64 + wait chan struct{} +} + +func (q *atomicSemaphore) acquire() { + if q.n.Add(-1) < 0 { + // We ran out of quota. Block until a release happens. + <-q.wait + } +} + +func (q *atomicSemaphore) release() { + // N.B. the "<= 0" check below should allow for this to work with multiple + // concurrent calls to acquire, but also note that with synchronous calls to + // acquire, as our system does, n will never be less than -1. There are + // fairness issues (queuing) to consider if this was to be generalized. + if q.n.Add(1) <= 0 { + // An acquire was waiting on us. Unblock it. + q.wait <- struct{}{} + } +} + +func newHandlerQuota(n uint32) *atomicSemaphore { + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a +} diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go index c3a5a9ac1..48a64cfe8 100644 --- a/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -109,7 +109,7 @@ const ( type simpleSharedBufferChildPool interface { Get(size int) []byte - Put(interface{}) + Put(any) } type bufferPool struct { @@ -133,7 +133,7 @@ func (p *bufferPool) Get(size int) []byte { func newBytesPool(size int) simpleSharedBufferChildPool { return &bufferPool{ Pool: sync.Pool{ - New: func() interface{} { + New: func() any { bs := make([]byte, size) return &bs }, diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go index 7a552a9b7..4ab70e2d4 100644 --- a/vendor/google.golang.org/grpc/stats/stats.go +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -59,12 +59,22 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte @@ -134,7 +144,7 @@ type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte // Length is the size of the uncompressed payload data. Does not include any diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index bcf2e4d81..a93360efb 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -99,25 +99,27 @@ func FromError(err error) (s *Status, ok bool) { } type grpcstatus interface{ GRPCStatus() *Status } if gs, ok := err.(grpcstatus); ok { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error has status nil, which maps to codes.OK. There // is no sensible behavior for this, so we turn it into // an error with codes.Unknown and discard the existing // status. return New(codes.Unknown, err.Error()), false } - return gs.GRPCStatus(), true + return grpcStatus, true } var gs grpcstatus if errors.As(err, &gs) { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error wraps an error that has status nil, which maps // to codes.OK. There is no sensible behavior for this, // so we turn it into an error with codes.Unknown and // discard the existing status. return New(codes.Unknown, err.Error()), false } - p := gs.GRPCStatus().Proto() + p := grpcStatus.Proto() p.Message = err.Error() return status.FromProto(p), true } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index de32a7597..b14b2fbea 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -54,7 +55,7 @@ import ( // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +type StreamHandler func(srv any, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -79,9 +80,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -90,7 +91,9 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -126,7 +129,7 @@ type ClientStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -135,7 +138,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -155,11 +158,6 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return nil, err - } - defer cc.idlenessMgr.onCallEnd() - // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -176,6 +174,16 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { @@ -433,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.URL.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -788,23 +796,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD - noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() - if err == transport.ErrNoHeaders { - noHeader = true - return nil - } return toRPCErr(err) }, cs.commitAttemptLocked) - if err != nil { - cs.finish(err) - return nil, err + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + if err != nil { + cs.finish(err) + // Do not return the error. The user should get it by calling Recv(). + return nil, nil + } + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { // Only log if binary log is on and header has not been logged, and // there is actually headers to log. logEntry := &binarylog.ServerHeader{ @@ -820,6 +829,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { binlog.Log(cs.ctx, logEntry) } } + return m, nil } @@ -860,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -904,7 +914,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return err } -func (cs *clientStream) RecvMsg(m interface{}) error { +func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() @@ -928,24 +938,6 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if len(cs.binlogs) != 0 { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } } return err } @@ -1001,18 +993,30 @@ func (cs *clientStream) finish(err error) { } } } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } if err == nil { @@ -1028,7 +1032,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1055,7 +1059,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} @@ -1348,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1393,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1512,7 +1516,7 @@ type ServerStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1521,7 +1525,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1602,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1610,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1677,7 +1681,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1685,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1757,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa40..07f012576 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go index 07a2d26b3..9ded79321 100644 --- a/vendor/google.golang.org/grpc/trace.go +++ b/vendor/google.golang.org/grpc/trace.go @@ -97,8 +97,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 353cfd528..6d2cadd79 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.57.0" +const Version = "1.59.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index a8e4732b3..bb480f1f9 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -84,12 +84,18 @@ not git grep -l 'x/net/context' -- "*.go" # thread safety. git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' + # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' @@ -106,7 +112,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do goimports -l . 2>&1 | not grep -vE "\.pb\.go" golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy -compat=1.17 + go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd @@ -168,8 +174,6 @@ proto.RegisteredExtension is deprecated proto.RegisteredExtensions is deprecated proto.RegisterMapType is deprecated proto.Unmarshaler is deprecated -resolver.Backend -resolver.GRPCLB Target is deprecated: Use the Target field in the BuildOptions instead. xxx_messageInfo_ ' "${SC_OUT}" diff --git a/vendor/modules.txt b/vendor/modules.txt index 0f014e8a6..dc5946f4f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,26 +1,26 @@ -# cloud.google.com/go v0.110.7 +# cloud.google.com/go v0.110.10 ## explicit; go 1.19 cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/compute v1.23.0 +# cloud.google.com/go/compute v1.23.3 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 ## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v1.1.2 +# cloud.google.com/go/iam v1.1.5 ## explicit; go 1.19 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/storage v1.32.0 +# cloud.google.com/go/storage v1.34.1 ## explicit; go 1.19 cloud.google.com/go/storage cloud.google.com/go/storage/internal cloud.google.com/go/storage/internal/apiv2 cloud.google.com/go/storage/internal/apiv2/storagepb -# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.1 +# github.com/Azure/azure-sdk-for-go/sdk/azcore v1.8.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud @@ -38,10 +38,10 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.1 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity -# github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 +# github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo @@ -50,7 +50,7 @@ github.com/Azure/azure-sdk-for-go/sdk/internal/log github.com/Azure/azure-sdk-for-go/sdk/internal/poller github.com/Azure/azure-sdk-for-go/sdk/internal/temporal github.com/Azure/azure-sdk-for-go/sdk/internal/uuid -# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.1.0 +# github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob @@ -99,7 +99,7 @@ github.com/VictoriaMetrics/fasthttp/stackless # github.com/VictoriaMetrics/metrics v1.24.0 ## explicit; go 1.20 github.com/VictoriaMetrics/metrics -# github.com/VictoriaMetrics/metricsql v0.56.2 +# github.com/VictoriaMetrics/metricsql v0.69.0 => github.com/VictoriaMetrics/metricsql v0.56.2 ## explicit; go 1.13 github.com/VictoriaMetrics/metricsql github.com/VictoriaMetrics/metricsql/binaryop @@ -109,8 +109,8 @@ github.com/VividCortex/ewma # github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 ## explicit; go 1.15 github.com/alecthomas/units -# github.com/aws/aws-sdk-go v1.45.1 -## explicit; go 1.11 +# github.com/aws/aws-sdk-go v1.47.2 +## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/auth/bearer github.com/aws/aws-sdk-go/aws/awserr @@ -153,8 +153,8 @@ github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/ssooidc github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/aws/aws-sdk-go-v2 v1.21.0 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2 v1.22.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2 github.com/aws/aws-sdk-go-v2/aws github.com/aws/aws-sdk-go-v2/aws/arn @@ -178,15 +178,15 @@ github.com/aws/aws-sdk-go-v2/internal/shareddefaults github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.13 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.0 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi -# github.com/aws/aws-sdk-go-v2/config v1.18.38 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/config v1.22.0 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.13.36 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/credentials v1.15.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds github.com/aws/aws-sdk-go-v2/credentials/endpointcreds @@ -194,65 +194,65 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.11 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.2 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.82 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.13.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/feature/s3/manager -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.41 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.35 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.42 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.5.0 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/ini -# github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.4 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/v4a github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4 -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.14 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.0 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.36 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/internal/checksum -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.35 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.4 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.1 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/internal/s3shared github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config -# github.com/aws/aws-sdk-go-v2/service/s3 v1.38.5 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/s3 v1.42.0 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/s3 github.com/aws/aws-sdk-go-v2/service/s3/internal/arn github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints github.com/aws/aws-sdk-go-v2/service/s3/types -# github.com/aws/aws-sdk-go-v2/service/sso v1.13.6 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/sso v1.17.0 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.15.5 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.19.0 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.21.5 -## explicit; go 1.15 +# github.com/aws/aws-sdk-go-v2/service/sts v1.25.0 +## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.14.2 -## explicit; go 1.15 +# github.com/aws/smithy-go v1.16.0 +## explicit; go 1.19 github.com/aws/smithy-go github.com/aws/smithy-go/auth/bearer github.com/aws/smithy-go/context @@ -284,7 +284,7 @@ github.com/cespare/xxhash/v2 ## explicit; go 1.17 github.com/cheggaaa/pb/v3 github.com/cheggaaa/pb/v3/termutil -# github.com/cpuguy83/go-md2man/v2 v2.0.2 +# github.com/cpuguy83/go-md2man/v2 v2.0.3 ## explicit; go 1.11 github.com/cpuguy83/go-md2man/v2/md2man # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc @@ -306,8 +306,8 @@ github.com/go-kit/log/level # github.com/go-logfmt/logfmt v0.6.0 ## explicit; go 1.17 github.com/go-logfmt/logfmt -# github.com/go-logr/logr v1.2.4 -## explicit; go 1.16 +# github.com/go-logr/logr v1.3.0 +## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 @@ -316,6 +316,7 @@ github.com/go-logr/stdr # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/gogoproto +github.com/gogo/protobuf/jsonpb github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys @@ -337,15 +338,8 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/google/go-cmp v0.5.9 -## explicit; go 1.13 -github.com/google/go-cmp/cmp -github.com/google/go-cmp/cmp/internal/diff -github.com/google/go-cmp/cmp/internal/flags -github.com/google/go-cmp/cmp/internal/function -github.com/google/go-cmp/cmp/internal/value -# github.com/google/s2a-go v0.1.5 -## explicit; go 1.16 +# github.com/google/s2a-go v0.1.7 +## explicit; go 1.19 github.com/google/s2a-go github.com/google/s2a-go/fallback github.com/google/s2a-go/internal/authinfo @@ -367,10 +361,10 @@ github.com/google/s2a-go/internal/v2/remotesigner github.com/google/s2a-go/internal/v2/tlsconfigstore github.com/google/s2a-go/retry github.com/google/s2a-go/stream -# github.com/google/uuid v1.3.1 +# github.com/google/uuid v1.4.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.2.5 +# github.com/googleapis/enterprise-certificate-proxy v0.3.2 ## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util @@ -396,7 +390,10 @@ github.com/jmespath/go-jmespath # github.com/jpillora/backoff v1.0.0 ## explicit; go 1.13 github.com/jpillora/backoff -# github.com/klauspost/compress v1.16.7 +# github.com/json-iterator/go v1.1.12 +## explicit; go 1.12 +github.com/json-iterator/go +# github.com/klauspost/compress v1.17.2 ## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -416,15 +413,21 @@ github.com/kylelemons/godebug/pretty # github.com/mattn/go-colorable v0.1.13 ## explicit; go 1.15 github.com/mattn/go-colorable -# github.com/mattn/go-isatty v0.0.19 +# github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty # github.com/mattn/go-runewidth v0.0.15 ## explicit; go 1.9 github.com/mattn/go-runewidth -# github.com/matttproud/golang_protobuf_extensions v1.0.4 -## explicit; go 1.9 -github.com/matttproud/golang_protobuf_extensions/pbutil +# github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 +## explicit; go 1.19 +github.com/matttproud/golang_protobuf_extensions/v2/pbutil +# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd +## explicit +github.com/modern-go/concurrent +# github.com/modern-go/reflect2 v1.0.2 +## explicit; go 1.12 +github.com/modern-go/reflect2 # github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f ## explicit github.com/mwitkow/go-conntrack @@ -440,18 +443,18 @@ github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib -# github.com/prometheus/client_golang v1.16.0 -## explicit; go 1.17 +# github.com/prometheus/client_golang v1.17.0 +## explicit; go 1.19 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promauto github.com/prometheus/client_golang/prometheus/testutil github.com/prometheus/client_golang/prometheus/testutil/promlint -# github.com/prometheus/client_model v0.4.0 -## explicit; go 1.18 +# github.com/prometheus/client_model v0.5.0 +## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.44.0 -## explicit; go 1.18 +# github.com/prometheus/common v0.45.0 +## explicit; go 1.20 github.com/prometheus/common/config github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -460,13 +463,13 @@ github.com/prometheus/common/version # github.com/prometheus/common/sigv4 v0.1.0 ## explicit; go 1.15 github.com/prometheus/common/sigv4 -# github.com/prometheus/procfs v0.11.1 +# github.com/prometheus/procfs v0.12.0 ## explicit; go 1.19 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.46.0 -## explicit; go 1.19 +# github.com/prometheus/prometheus v0.47.2 +## explicit; go 1.20 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery github.com/prometheus/prometheus/discovery/targetgroup @@ -484,6 +487,8 @@ github.com/prometheus/prometheus/scrape github.com/prometheus/prometheus/storage github.com/prometheus/prometheus/storage/remote github.com/prometheus/prometheus/storage/remote/azuread +github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus +github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite github.com/prometheus/prometheus/tsdb github.com/prometheus/prometheus/tsdb/chunkenc github.com/prometheus/prometheus/tsdb/chunks @@ -559,12 +564,32 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.43.0 +# go.opentelemetry.io/collector/pdata v1.0.0-rcv0017 +## explicit; go 1.20 +go.opentelemetry.io/collector/pdata/internal +go.opentelemetry.io/collector/pdata/internal/data +go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/logs/v1 +go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/metrics/v1 +go.opentelemetry.io/collector/pdata/internal/data/protogen/collector/trace/v1 +go.opentelemetry.io/collector/pdata/internal/data/protogen/common/v1 +go.opentelemetry.io/collector/pdata/internal/data/protogen/logs/v1 +go.opentelemetry.io/collector/pdata/internal/data/protogen/metrics/v1 +go.opentelemetry.io/collector/pdata/internal/data/protogen/resource/v1 +go.opentelemetry.io/collector/pdata/internal/data/protogen/trace/v1 +go.opentelemetry.io/collector/pdata/internal/json +go.opentelemetry.io/collector/pdata/internal/otlp +go.opentelemetry.io/collector/pdata/pcommon +go.opentelemetry.io/collector/pdata/pmetric +go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp +# go.opentelemetry.io/collector/semconv v0.88.0 +## explicit; go 1.20 +go.opentelemetry.io/collector/semconv/v1.6.1 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 ## explicit; go 1.19 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.17.0 -## explicit; go 1.19 +# go.opentelemetry.io/otel v1.19.0 +## explicit; go 1.20 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage @@ -575,21 +600,24 @@ go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 -# go.opentelemetry.io/otel/metric v1.17.0 -## explicit; go 1.19 +# go.opentelemetry.io/otel/metric v1.19.0 +## explicit; go 1.20 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/trace v1.17.0 -## explicit; go 1.19 +# go.opentelemetry.io/otel/trace v1.19.0 +## explicit; go 1.20 go.opentelemetry.io/otel/trace # go.uber.org/atomic v1.11.0 ## explicit; go 1.18 go.uber.org/atomic -# go.uber.org/goleak v1.2.1 -## explicit; go 1.18 +# go.uber.org/goleak v1.3.0 +## explicit; go 1.20 go.uber.org/goleak go.uber.org/goleak/internal/stack -# golang.org/x/crypto v0.12.0 +# go.uber.org/multierr v1.11.0 +## explicit; go 1.19 +go.uber.org/multierr +# golang.org/x/crypto v0.14.0 ## explicit; go 1.17 golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 @@ -600,13 +628,12 @@ golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 -# golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 +# golang.org/x/exp v0.0.0-20231006140011-7918f672742d => golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.14.0 +# golang.org/x/net v0.17.0 ## explicit; go 1.17 -golang.org/x/net/context golang.org/x/net/http/httpguts golang.org/x/net/http/httpproxy golang.org/x/net/http2 @@ -616,27 +643,28 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace -# golang.org/x/oauth2 v0.11.0 +# golang.org/x/oauth2 v0.13.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/google golang.org/x/oauth2/google/internal/externalaccount +golang.org/x/oauth2/google/internal/externalaccountauthorizeduser +golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.3.0 +# golang.org/x/sync v0.4.0 ## explicit; go 1.17 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.11.0 +# golang.org/x/sys v0.13.0 ## explicit; go 1.17 golang.org/x/sys/cpu -golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.12.0 +# golang.org/x/text v0.13.0 ## explicit; go 1.17 golang.org/x/text/secure/bidirule golang.org/x/text/transform @@ -645,11 +673,11 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 -## explicit; go 1.17 +# golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 +## explicit; go 1.18 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.138.0 +# google.golang.org/api v0.149.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -667,7 +695,7 @@ google.golang.org/api/transport google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/appengine v1.6.7 +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine google.golang.org/appengine/internal @@ -677,26 +705,24 @@ google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api -google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch -google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d +# google.golang.org/genproto v0.0.0-20231030173426-d783a09b4405 ## explicit; go 1.19 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d +# google.golang.org/genproto/googleapis/api v0.0.0-20231030173426-d783a09b4405 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231030173426-d783a09b4405 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.57.0 -## explicit; go 1.17 +# google.golang.org/grpc v1.59.0 +## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -738,6 +764,7 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver @@ -753,6 +780,7 @@ google.golang.org/grpc/keepalive google.golang.org/grpc/metadata google.golang.org/grpc/peer google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/manual google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status @@ -799,3 +827,5 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 +# github.com/VictoriaMetrics/metricsql => github.com/VictoriaMetrics/metricsql v0.56.2 +# golang.org/x/exp => golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1